repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
pientaa/opening-black-box
|
[
"9fa3ecd5298a940893f289550d441a38b0bee71d"
] |
[
"monitor-manager/monitor-manager.py"
] |
[
"import json\r\nimport logging\r\nimport pandas as pd\r\nimport requests\r\nimport socket\r\nimport time\r\nfrom csv import reader\r\nfrom flask import Flask\r\n\r\napp = Flask(__name__)\r\n\r\n\r\[email protected]('/')\r\ndef health_check():\r\n return \"Monitor Manager working\"\r\n\r\n\r\[email protected]('/experiments', methods=['post'])\r\ndef start_experiments():\r\n experiments_plan = pd.read_csv(\"opening-black-box/monitor-manager/experiments-plan.csv\", delimiter=',')\r\n hosts_info = pd.read_csv(\"opening-black-box/monitor-manager/hosts-info.csv\", delimiter=',')\r\n\r\n for index, row in experiments_plan.iterrows():\r\n for iteration in range(row[\"iterations\"]):\r\n system_monitor(hosts_info, row['function_name'])\r\n\r\n return \"Monitoring finished\", 200\r\n\r\n\r\ndef system_monitor(hosts_info, function_name):\r\n logging.info(\"Task submitted\")\r\n submit_response = requests.post(\"http://192.168.55.20:5000/submit\", json={\"function_name\": function_name})\r\n submit_response = json.loads(submit_response.text)\r\n driver_id = submit_response[\"submissionId\"]\r\n logging.info('Submit driver ID {0} | function {1}'.format(driver_id, function_name))\r\n\r\n for index, row in hosts_info.iterrows():\r\n url = \"http://\" + str(row['host_ip']) + \":8063/monitor\"\r\n request_data = {\r\n \"container_name\": row['container_name'],\r\n \"function_name\": function_name\r\n }\r\n logging.info('Host {} : starting monitor'.format(row['host_ip']))\r\n response = requests.post(url, json=request_data)\r\n logging.info('Host {} : {} '.format(row['host_ip'], response.text))\r\n\r\n status = \"RUNNING\"\r\n while status != \"FINISHED\":\r\n status_response = requests.get(\"http://192.168.55.20:5000/status\", json={\"driver_id\": driver_id})\r\n status_response = json.loads(status_response.text)\r\n status = status_response[\"driverState\"]\r\n logging.info('Driver status {} '.format(status))\r\n time.sleep(1)\r\n\r\n logging.info(\"Experiment finished\")\r\n time.sleep(3)\r\n for index, row in hosts_info.iterrows():\r\n url = \"http://\" + str(row['host_ip']) + \":8063/monitor\"\r\n response = requests.delete(url)\r\n logging.info('Host {} : {} '.format(row['host_ip'], response.text))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n logging.basicConfig(filename='opening-black-box/monitor-manager/monitor-manager.log', level=logging.INFO,\r\n format='%(asctime)s:%(message)s')\r\n app.run(debug=True, host='0.0.0.0', port=8888)\r\n"
] |
[
[
"pandas.read_csv"
]
] |
vhzy/pythia
|
[
"8fae4da2b7ad4d047a1a51651407ff1f91745d5f"
] |
[
"get_npy.py"
] |
[
"import numpy as np\ntest=np.load('/home/ubuntu/hzy/pythia/data/m4c_textvqa_ocr_en_frcn_features/train_images/f441f29812b385ad_info.npy',encoding = \"latin1\",allow_pickle=True) #加载文件\ndoc = open('contrast9.txt', 'a') #打开一个存储文件,并依次写入\nprint(test, file=doc) #将打印内容写入文件中"
] |
[
[
"numpy.load"
]
] |
tohoaa/wetectron
|
[
"b2e406a71df31ee2a80e101c513fccc0b21b82cd"
] |
[
"tools/test_net.py"
] |
[
"# --------------------------------------------------------\n# Copyright (C) 2020 NVIDIA Corporation. All rights reserved.\n# Nvidia Source Code License-NC\n# --------------------------------------------------------\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\n# Set up custom environment before nearly anything else is imported\n# NOTE: this should be the first import (no not reorder)\nfrom wetectron.utils.env import setup_environment # noqa F401 isort:skip\n\nimport argparse\nimport os\n\nimport torch\nfrom wetectron.config import cfg\nfrom wetectron.data import make_data_loader\nfrom wetectron.engine.inference import inference\nfrom wetectron.modeling.detector import build_detection_model\nfrom wetectron.utils.checkpoint import DetectronCheckpointer\nfrom wetectron.utils.collect_env import collect_env_info\nfrom wetectron.utils.comm import synchronize, get_rank\nfrom wetectron.utils.logger import setup_logger\nfrom wetectron.utils.miscellaneous import mkdir\n\n# Check if we can enable mixed-precision via apex.amp\ntry:\n from apex import amp\nexcept ImportError:\n raise ImportError('Use APEX for mixed precision via apex.amp')\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Inference\")\n parser.add_argument(\n \"--config-file\",\n default=\"/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\n \"--ckpt\",\n help=\"The path to the checkpoint for test, default is the latest checkpoint.\",\n default=None,\n )\n parser.add_argument(\n \"--task\",\n default=\"det\",\n type=str,\n help=\"eval task: det | corloc\",\n )\n parser.add_argument(\n \"--vis\",\n dest=\"vis\",\n help=\"Visualize the final results\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n distributed = num_gpus > 1\n\n if distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n synchronize()\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n save_dir = \"\"\n logger = setup_logger(\"wetectron\", save_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(cfg)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n model = build_detection_model(cfg)\n model.to(cfg.MODEL.DEVICE)\n\n # Initialize mixed-precision if necessary\n use_mixed_precision = cfg.DTYPE == 'float16'\n amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE)\n\n output_dir = cfg.OUTPUT_DIR\n checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)\n ckpt = cfg.MODEL.WEIGHT if args.ckpt is None else args.ckpt\n\n _ = checkpointer.load(ckpt, use_latest=args.ckpt is None)\n\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n iou_types = iou_types + (\"keypoints\",)\n output_folders = [None] * len(cfg.DATASETS.TEST)\n dataset_names = cfg.DATASETS.TEST\n if cfg.OUTPUT_DIR:\n for idx, dataset_name in enumerate(dataset_names):\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)\n for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):\n inference(\n model,\n data_loader_val,\n dataset_name=dataset_name,\n iou_types=iou_types,\n box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=output_folder,\n vis=args.vis,\n task=args.task,\n )\n synchronize()\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.distributed.init_process_group",
"torch.cuda.set_device"
]
] |
erfect2020/DecompositionForFusion
|
[
"73e65485b84a3802ecd80b8b926108d103be1a62"
] |
[
"models/UCSharedModelProCommon.py"
] |
[
"import torch.nn as nn\nimport torch\nimport torch.nn.init as init\nfrom .resnet import ResNestLayer, Bottleneck\nimport math\n\n\nclass UCSharedNetPro(nn.Module):\n def __init__(self):\n super(UCSharedNetPro, self).__init__()\n encoder_upper = [nn.Conv2d(3, 16, 3, 1, 1, bias=True),\n nn.ReLU(inplace=True),\n ResNestLayer(Bottleneck, 8, 6, stem_width=8, norm_layer=None),\n ]\n self.encoder_upper = nn.Sequential(*encoder_upper)\n self.maxpool_upper = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.upper_encoder_layer1 = ResNestLayer(Bottleneck, 16, 6, stem_width=16, norm_layer=None, is_first=False)\n self.upper_encoder_layer2 = ResNestLayer(Bottleneck, 32, 4, stem_width=32, stride=2, norm_layer=None)\n self.upper_encoder_layer3 = ResNestLayer(Bottleneck, 64, 4, stem_width=64, stride=2, norm_layer=None)\n\n self.encoder_lower = self.encoder_upper\n self.maxpool_lower = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.lower_encoder_layer1 = self.upper_encoder_layer1\n self.lower_encoder_layer2 = self.upper_encoder_layer2\n self.lower_encoder_layer3 = self.upper_encoder_layer3\n\n encoder_body_fusion = [\n ResNestLayer(Bottleneck, 256, 4, stem_width=256, norm_layer=None, is_first=False)\n ]\n self.common_encoder = nn.Sequential(*encoder_body_fusion)\n\n self.decoder_common_layer1 = ResNestLayer(Bottleneck, 64, 4, stem_width=512, avg_down=False, avd=False, stride=1, norm_layer=None)\n self.decoder_common_up1 = nn.Upsample(scale_factor=2, mode='bilinear') # nn.PixelShuffle(2)\n self.decoder_common_layer2 = ResNestLayer(Bottleneck, 16, 4, stem_width=256, avg_down=False, avd=False, stride=1, norm_layer=None)\n self.decoder_common_up2 = nn.Upsample(scale_factor=2, mode='bilinear') # nn.PixelShuffle(2)\n self.decoder_common_layer3 = ResNestLayer(Bottleneck, 4, 6, stem_width=96, avg_down=False, avd=False, stride=1, norm_layer=None)\n self.decoder_common_up3 = nn.Upsample(scale_factor=2, mode='bilinear') # nn.PixelShuffle(2)\n decoder_common_layer4 = [\n ResNestLayer(Bottleneck, 4, 6, stem_width=8, avg_down=False, avd=False, stride=1, norm_layer=None),\n ]\n self.decoder_common_layer4 = nn.Sequential(*decoder_common_layer4)\n decoder_projection_layer = [nn.Conv2d(16, 3, 3, 1, 1, bias=True),\n nn.ReLU(inplace=True)]\n self.decoder_common_projection_layer = nn.Sequential(*decoder_projection_layer)\n\n self.decoder_upper_layer1 = ResNestLayer(Bottleneck, 96, 4, stem_width=640, avg_down=False, avd=False, stride=1, norm_layer=None)\n self.decoder_upper_up1 = nn.Upsample(scale_factor=2, mode='bilinear') # nn.PixelShuffle(2)\n # 192,64 -> 128, 32\n self.decoder_upper_layer2 = ResNestLayer(Bottleneck, 32, 4, stem_width=256, avg_down=False, avd=False, stride=1, norm_layer=None)\n self.decoder_upper_up2 = nn.Upsample(scale_factor=2, mode='bilinear') # nn.PixelShuffle(2)\n self.decoder_upper_layer3 = ResNestLayer(Bottleneck, 16, 6, stem_width=96, avg_down=False, avd=False, stride=1, norm_layer=None)\n self.decoder_upper_up3 = nn.Upsample(scale_factor=2, mode='bilinear') # nn.PixelShuffle(2)\n decoder_upper_layer4 = [\n ResNestLayer(Bottleneck, 4, 6, stem_width=32, avg_down=False, avd=False, stride=1, norm_layer=None),\n ]\n self.decoder_upper_layer4 = nn.Sequential(*decoder_upper_layer4)\n upper_decoder_projection_layer = [nn.Conv2d(16, 3, 3, 1, 1, bias=True),\n nn.ReLU(inplace=True)]\n self.decoder_upper_projection_layer = nn.Sequential(*upper_decoder_projection_layer )\n\n self.decoder_lower_layer1 = self.decoder_upper_layer1\n self.decoder_lower_up1 = nn.Upsample(scale_factor=2, mode='bilinear') # nn.PixelShuffle(2)\n self.decoder_lower_layer2 = self.decoder_upper_layer2\n self.decoder_lower_up2 = nn.Upsample(scale_factor=2, mode='bilinear') # nn.PixelShuffle(2)\n self.decoder_lower_layer3 = self.decoder_upper_layer3\n self.decoder_lower_up3 = nn.Upsample(scale_factor=2, mode='bilinear') # nn.PixelShuffle(2)\n self.decoder_lower_layer4 = self.decoder_upper_layer4\n self.decoder_lower_projection_layer = self.decoder_upper_projection_layer\n\n self.fusion_rule = nn.Sequential(*[\n nn.Conv2d(16, 3, 3, 1, 1, bias=True),\n nn.ReLU(inplace=True)\n ])\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.InstanceNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, img1, img2):\n\n feature_upper = self.encoder_upper(img1)\n feature_upper0 = self.maxpool_upper(feature_upper)\n feature_upper1 = self.upper_encoder_layer1(feature_upper0)\n feature_upper2 = self.upper_encoder_layer2(feature_upper1)\n feature_upper3 = self.upper_encoder_layer3(feature_upper2)\n\n feature_lower = self.encoder_lower(img2)\n feature_lower0 = self.maxpool_lower(feature_lower)\n feature_lower1 = self.lower_encoder_layer1(feature_lower0)\n feature_lower2 = self.lower_encoder_layer2(feature_lower1)\n feature_lower3 = self.lower_encoder_layer3(feature_lower2)\n\n feature_concat = torch.cat((feature_upper3, feature_lower3), dim=1)\n feature_common = self.common_encoder(feature_concat)\n\n common_part = self.decoder_common_layer1(feature_common)\n common_part = self.decoder_common_up1(common_part)\n common_part = torch.cat((common_part, feature_upper2, feature_lower2), dim=1)\n common_part = self.decoder_common_layer2(common_part)\n common_part = self.decoder_common_up2(common_part)\n common_part = torch.cat((common_part, feature_upper1, feature_lower1), dim=1)\n common_part = self.decoder_common_layer3(common_part)\n common_part = self.decoder_common_up3(common_part)\n common_part = self.decoder_common_layer4(common_part)\n common_part_embedding = common_part\n common_part = self.decoder_common_projection_layer(common_part)\n\n feature_de_upper = torch.cat((feature_common, feature_upper3), dim=1)\n upper_part = self.decoder_upper_layer1(feature_de_upper)\n upper_part = self.decoder_upper_up1(upper_part)\n upper_part = torch.cat((upper_part, feature_upper2), dim=1)\n upper_part = self.decoder_upper_layer2(upper_part)\n upper_part = self.decoder_upper_up2(upper_part)\n upper_part = torch.cat((upper_part, feature_upper1), dim=1)\n upper_part = self.decoder_upper_layer3(upper_part)\n upper_part = self.decoder_upper_up3(upper_part)\n upper_part = self.decoder_upper_layer4(upper_part)\n upper_part_embeding = upper_part\n upper_part = self.decoder_upper_projection_layer(upper_part)\n\n feature_de_lower = torch.cat((feature_common, feature_lower3), dim=1)\n lower_part = self.decoder_lower_layer1(feature_de_lower)\n lower_part = self.decoder_lower_up1(lower_part)\n lower_part = torch.cat((lower_part, feature_lower2), dim=1)\n lower_part = self.decoder_lower_layer2(lower_part)\n lower_part = self.decoder_lower_up2(lower_part)\n lower_part = torch.cat((lower_part, feature_lower1), dim=1)\n lower_part = self.decoder_lower_layer3(lower_part)\n lower_part = self.decoder_lower_up3(lower_part)\n lower_part = self.decoder_lower_layer4(lower_part)\n lower_part_embeddding = lower_part\n lower_part = self.decoder_lower_projection_layer(lower_part)\n\n fusion_part = self.fusion_rule(upper_part_embeding+ lower_part_embeddding+ common_part_embedding)\n\n return common_part, upper_part, lower_part, fusion_part\n"
] |
[
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Upsample",
"torch.nn.ReLU"
]
] |
hckynumber9/nn-holdem
|
[
"ef49a91369733eb1453c5db71c77f644ca91fa23"
] |
[
"holdem/playercontrol.py"
] |
[
"import numpy as np\nimport uuid\nfrom threading import Thread\n\nimport xmlrpc.client\nfrom xmlrpc.server import SimpleXMLRPCServer\n\nfrom .holdemai import HoldemAI\nfrom deuces.deuces import Card\n\n# xmlrpc.client.Marshaller.dispatch[long] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n# xmlrpc.client.Marshaller.dispatch[type(0)] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\nclass PlayerControl(object):\n def __init__(self, host, port, playerID, ai_flag = False, ai_type = -1, name = 'Alice', stack = 2000):\n self.server = xmlrpc.client.ServerProxy('http://0.0.0.0:8000')\n self.daemon = True\n\n self._ai_flag = ai_flag\n self.playerID = playerID\n\n if self._ai_flag:\n self._ai_type = ai_type\n if self._ai_type == 0:\n self.ai = HoldemAI(uuid.uuid4())\n # print(self.ai.networkID)\n self._name = name\n self.host = host\n self.port = port\n self._stack = stack\n self._hand = []\n self.add_player()\n\n def get_ai_id(self):\n if self._ai_type == 0:\n return str(self.ai.networkID)\n else:\n return self._ai_type\n\n def save_ai_state(self):\n if self._ai_flag and self._ai_type == 0:\n print('AI type NEURAL NETWORK won (', self.get_ai_id(), ')')\n # self.writer.write([self.ai.networkID, consec_wins])\n self.ai.save()\n else:\n print('AI type ', self._ai_type, 'won')\n\n def delete_ai(self):\n if self._ai_type == 0:\n self.ai.delete()\n\n def new_ai(self, ai_id):\n if ai_id == 'unchanged':\n pass\n else:\n self.ai = HoldemAI(ai_id) # defaults to random network if ai_id not recognized\n\n def add_player(self):\n # print('Player', self.playerID, 'joining game')\n self.server.add_player(self.host, self.port, self.playerID, self._name, self._stack)\n\n def remove_player(self):\n self.server.remove_player(self.playerID)\n\n def rejoin(self):\n self.remove_player()\n self.reset_stack()\n self.add_player()\n\n def rejoin_new(self, ai_id):\n self.new_ai(ai_id)\n self.rejoin()\n\n def new_ai_type(self, ai_type):\n self._ai_type = ai_type\n\n def reset_stack(self):\n self._stack = 2000\n\n def print_table(self, table_state):\n print('Stacks:')\n players = table_state.get('players', None)\n for player in players:\n print(player[4], ': ', player[1], end='')\n if player[2] == True:\n print('(P)', end='')\n if player[3] == True:\n print('(Bet)', end='')\n if player[0] == table_state.get('button'):\n print('(Button)', end='')\n if players.index(player) == table_state.get('my_seat'):\n print('(me)', end='')\n print('')\n\n print('Community cards: ', end='')\n Card.print_pretty_cards(table_state.get('community', None))\n print('Pot size: ', table_state.get('pot', None))\n\n print('Pocket cards: ', end='')\n Card.print_pretty_cards(table_state.get('pocket_cards', None))\n print('To call: ', table_state.get('tocall', None))\n\n def update_localstate(self, table_state):\n self._stack = table_state.get('stack')\n self._hand = table_state.get('pocket')\n\n # cleanup\n def player_move(self, table_state):\n self.update_localstate(table_state)\n bigblind = table_state.get('bigblind')\n tocall = min(table_state.get('tocall', None),self._stack)\n minraise = table_state.get('minraise', None)\n # print('minraise ', minraise)\n # move_tuple = ('Exception!',-1)\n\n # ask this human meatbag what their move is\n if not self._ai_flag:\n self.print_table(table_state)\n if tocall == 0:\n print('1) Raise')\n print('2) Check')\n try:\n choice = int(input('Choose your option: '))\n except:\n choice = 0\n if choice == 1:\n choice2 = int(input('How much would you like to raise to? (min = {}, max = {})'.format(minraise,self._stack)))\n while choice2 < minraise:\n choice2 = int(input('(Invalid input) How much would you like to raise? (min = {}, max = {})'.format(minraise,self._stack)))\n move_tuple = ('raise',choice2)\n elif choice == 2:\n move_tuple = ('check', 0)\n else:\n move_tuple = ('check', 0)\n else:\n print('1) Raise')\n print('2) Call')\n print('3) Fold')\n try:\n choice = int(input('Choose your option: '))\n except:\n choice = 0\n if choice == 1:\n choice2 = int(input('How much would you like to raise to? (min = {}, max = {})'.format(minraise,self._stack)))\n while choice2 < minraise:\n choice2 = int(input('(Invalid input) How much would you like to raise to? (min = {}, max = {})'.format(minraise,self._stack)))\n move_tuple = ('raise',choice2)\n elif choice == 2:\n move_tuple = ('call', tocall)\n elif choice == 3:\n move_tuple = ('fold', -1)\n else:\n move_tuple = ('call', tocall)\n\n # feed table state to ai and get a response\n else:\n # neural network output\n if self._ai_type == 0:\n # neural network output\n move_tuple = self.ai.act(table_state)\n\n elif self._ai_type == 1:\n # check/fold bot\n if tocall > 0:\n move_tuple = ('fold',-1)\n else:\n move_tuple = ('check', 0)\n elif self._ai_type == 2:\n # check/call bot\n if tocall > 0:\n move_tuple = ('call',tocall)\n else:\n move_tuple = ('check', 0)\n else:\n if tocall >0:\n # 0 - Raise\n # 1 - Call\n # 2 - Fold\n move_idx = np.random.randint(0,2)\n if move_idx == 0:\n try:\n bet_size = np.random.randint(minraise, self._stack)\n bet_size -= bet_size % bigblind\n except:\n bet_size = self._stack\n if bet_size <= tocall:\n move_tuple = ('call', tocall)\n else:\n move_tuple = ('raise', bet_size)\n elif move_idx == 1:\n move_tuple = ('call', tocall)\n else:\n move_tuple = ('fold', -1)\n else:\n # 0 - Raise\n # 1 - Check\n move_idx = np.random.randint(0,1)\n if move_idx == 0:\n try:\n bet_size = np.random.randint(minraise, self._stack)\n bet_size -= bet_size % bigblind\n except:\n bet_size = self._stack\n move_tuple = ('raise', bet_size)\n else:\n move_tuple = ('check',0)\n\n return move_tuple\n\nclass PlayerControlProxy(object):\n def __init__(self,player):\n self._quit = False\n\n self._player = player\n self.server = SimpleXMLRPCServer((self._player.host, self._player.port), logRequests=False, allow_none=True)\n self.server.register_instance(self, allow_dotted_names=True)\n Thread(target = self.run).start()\n\n def run(self):\n while not self._quit:\n self.server.handle_request()\n\n def player_move(self, output_spec):\n return self._player.player_move(output_spec)\n\n def print_table(self, table_state):\n self._player.print_table(table_state)\n\n def join(self):\n self._player.add_player()\n\n def rejoin_new(self, ai_id = 'unchanged'):\n self._player.rejoin_new(ai_id)\n\n def rejoin(self, ai_type = 0):\n self._player.rejoin()\n\n def get_ai_id(self):\n return self._player.get_ai_id()\n\n def save_ai_state(self):\n self._player.save_ai_state()\n\n def delete_ai(self):\n self._player.delete_ai()\n\n def quit(self):\n self._player.server.remove_player(self._player.playerID)\n self._quit = True\n"
] |
[
[
"numpy.random.randint"
]
] |
avagait/gaitcopy
|
[
"2fee760156b289ef12f19fb366fb62cf535c305e"
] |
[
"nets/mj_gaitcopy_model.py"
] |
[
"# Copies signatures\n# (c) MJMJ/2021\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nimport tensorflow_addons as tfa\nimport deepdish as dd\nimport os\nimport numpy as np\nfrom nets.triplet_loss_all import TripletBatchAllLoss\n\n# ===============================================================\n# Version 1 of L1-smooth\nHUBER_DELTA = 0.5\n\n\ndef mj_smoothL1(y_true, y_pred):\n x = K.abs(y_true - y_pred)\n x = K.switch(x < HUBER_DELTA, 0.5 * x ** 2, HUBER_DELTA * (x - 0.5 * HUBER_DELTA))\n return K.sum(x)\n\n\ndef mj_smoothL1ex(h_delta):\n\tdef mj_smoothL1_(y_true, y_pred):\n\t\tx = K.abs(y_true - y_pred)\n\t\tx = K.switch(x < h_delta, 0.5 * x ** 2, h_delta * (x - 0.5 * h_delta))\n\t\t# return K.mean(x, axis=-1)\n\t\treturn K.sum(x)\n\treturn mj_smoothL1_\n\n\nclass GaitCopyModel():\n\tdef __init__(self, experdir):\n\t\tself.model = None\n\t\tself.model_encode = None\n\t\tself.hist = None\n\t\tself.experdir = experdir\n\n\tdef load(self, netpath, encode_layer=None, compile=True, gaitset=False):\n\t\ttry:\n\t\t\tif gaitset:\n\t\t\t\tself.model = tf.keras.models.load_model(netpath, compile=compile, custom_objects={\"MatMul\": MatMul(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'tf': tf,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"TripletBatchAllLoss\": TripletBatchAllLoss()})\n\t\t\t\tencode_layer = \"flatten\"\n\t\t\telse:\n\t\t\t\tself.model = tf.keras.models.load_model(netpath, compile=compile, custom_objects={'mj_smoothL1': mj_smoothL1})\n\t\t\t\tprint(\"++++++++++++++++++++++++++++++++++++++++++++\")\n\t\t\t\tprint(\"Model loaded from: \" + netpath)\n\t\texcept:\n\t\t\t# Load config file and build model.\n\t\t\tbdir = os.path.dirname(netpath)\n\t\t\tfconfig = os.path.join(bdir, \"model-config.hdf5\")\n\t\t\tnetconfig = dd.io.load(fconfig)\n\t\t\tself.model = self.build_by_config(netconfig)\n\n\t\t\t# Load weights file.\n\t\t\tbname = os.path.basename(netpath)\n\t\t\tfparts = os.path.splitext(bname)\n\t\t\tfilewes = os.path.join(bdir, fparts[0] + \"_weights.hdf5\")\n\t\t\tself.model.load_weights(filewes, by_name=True)\n\n\t\t\tprint(\"++++++++++++++++++++++++++++++++++++++++++++\")\n\t\t\tprint(\"Model loaded from config + weight files: \" + fconfig + ' ' + filewes)\n\n\t\tif encode_layer is None:\n\t\t\tself.model_encode = tf.keras.Model(self.model.input, self.model.layers[-1].input)\n\t\telse:\n\t\t\tout_layer = self.model.get_layer(encode_layer).output\n\t\t\tself.model_encode = tf.keras.Model(self.model.input, out_layer)\n\n\tdef build_or_load(self, input_shape, number_convolutional_layers, filters_size, filters_numbers, strides,\n\t ndense_units=2048, weight_decay=0.0001, dropout=0.4, optimizer=tf.keras.optimizers.SGD(0.01, 0.9),\n\t nclasses=0, initnet=\"\", freeze_convs=False, use3D=False, freeze_all=False, model_version='iwann', lstm=-1,\n\t\t\t\t\t lstm_number=512, dropout_lstm=0.0, L2_norm_lstm=None, loss_mode='both', margin=0.25, loss_weights=[1.0, 0.1],\n\t\t\t\t\t with_l2=False, kinit='glorot_uniform', drop_code=0, mobpars=None, hdelta=0.5):\n\n\t\tif initnet == \"\":\n\t\t\tbuild = self.build(input_shape, number_convolutional_layers, filters_size, filters_numbers, strides,\n\t\t\t\t\t\t\t ndense_units, weight_decay, dropout, optimizer, nclasses, use3D=use3D,\n\t\t\t\t\t\t\t model_version=model_version, lstm=lstm, lstm_number=lstm_number,\n\t\t\t\t\t\t\t dropout_lstm=dropout_lstm, L2_norm_lstm=L2_norm_lstm, loss_mode=loss_mode, margin=margin,\n\t\t\t\t\t\t\t loss_weights=loss_weights, with_l2=with_l2, kinit=kinit, drop_code=drop_code,\n\t\t\t\t\t\t\t mobpars=mobpars)\n\t\telse:\n\t\t\tself.load(initnet)\n\n\t\t\t# Check if freeze some weights\n\t\t\tif freeze_convs or freeze_all:\n\t\t\t\tseq1 = self.model.get_layer(\"convBranch\")\n\n\t\t\t\tfor layer in seq1.layers:\n\t\t\t\t\tif freeze_all or type(layer) == tf.keras.layers.Conv2D or type(layer) == tf.keras.layers.Conv3D:\n\t\t\t\t\t\tlayer.trainable = False\n\n\t\t\t\tfor layer in self.model.layers:\n\t\t\t\t\tif freeze_all or type(layer) == tf.keras.layers.Dense:\n\t\t\t\t\t\tlayer.trainable = False\n\n\t\t\t# Check if exists FC for classification:\n\t\t\treplace_fc = False\n\t\t\tfor layer in self.model.layers:\n\t\t\t\tif layer.name == \"probs\":\n\t\t\t\t\t# Check number of classes.\n\t\t\t\t\tif layer.units != nclasses:\n\t\t\t\t\t\tprint(\"Replacing FC layer for the new classes...\")\n\t\t\t\t\t\treplace_fc = True\n\n\t\t\tif replace_fc:\n\t\t\t\tmain_branch = self.model.layers[-1].input\n\t\t\t\tmain_branch = tf.keras.layers.Dense(nclasses, activation='softmax', kernel_initializer='he_uniform',\n\t\t\t\t name=\"probs\")(main_branch)\n\t\t\t\tself.model = tf.keras.Model(inputs=self.model.input, outputs=main_branch)\n\t\t\t\tself.model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy',\n\t\t\t\t metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])\n\n\t\t\tprint(\"Alright\")\n\n\tdef build_by_config(self, netconfig):\n\t\tfilters_size = netconfig[\"filters_size\"]\n\t\tfilters_numbers = netconfig[\"filters_numbers\"]\n\t\tstrides = netconfig[\"strides\"]\n\t\tinput_shape = netconfig[\"input_shape\"]\n\t\tndense_units = netconfig[\"ndense_units\"]\n\t\tweight_decay = netconfig[\"weight_decay\"]\n\t\tdropout = netconfig[\"dropout\"]\n\t\tif \"nclasses\" in netconfig.keys():\n\t\t\tnclasses = netconfig[\"nclasses\"]\n\t\telse:\n\t\t\tnclasses = 150\n\t\toptimizer = netconfig[\"optimizer\"]\n\n\t\tif \"use3D\" in netconfig.keys():\n\t\t\tuse3D = netconfig[\"use3D\"]\n\t\telse:\n\t\t\tuse3D = False\n\n\t\tif \"drop_code\" in netconfig.keys():\n\t\t\tdrop_code = netconfig[\"drop_code\"]\n\t\telse:\n\t\t\tdrop_code = 0\n\n\t\tmodel_version = netconfig[\"model_version\"]\n\t\tlstm = netconfig[\"lstm\"]\n\t\tlstm_number = netconfig[\"lstm_number\"]\n\t\tdropout_lstm = netconfig[\"dropout_lstm\"]\n\t\tL2_norm_lstm = netconfig[\"L2_norm_lstm\"]\n\t\tif \"l2\" in netconfig.keys():\n\t\t\twith_l2 = netconfig[\"l2\"]\n\t\telse:\n\t\t\twith_l2 = False\n\n\t\tself.model = self.build(input_shape, len(filters_numbers), filters_size, filters_numbers, strides, ndense_units,\n\t\t weight_decay, dropout, nclasses=nclasses, optimizer=optimizer, use3D=use3D, model_version=model_version,\n\t\t lstm=lstm, lstm_number=lstm_number, dropout_lstm=dropout_lstm, L2_norm_lstm=L2_norm_lstm,\n\t\t\t\t\t\t\t\twith_l2=with_l2, drop_code=drop_code)\n\n\tdef build(self, input_shape, number_convolutional_layers, filters_size, filters_numbers, strides, ndense_units=512,\n\t weight_decay=0.0005, dropout=0.4, optimizer=tf.keras.optimizers.SGD(0.01, 0.9), nclasses=0, use3D=False,\n\t\t\t model_version='iwann', lstm=-1,\n\t\t\t lstm_number=512, dropout_lstm=0.0, L2_norm_lstm=None, loss_mode='MSE', margin=0.25, loss_weights=[1.0, 0.1],\n\t\t\t with_l2=False, kinit='glorot_uniform', drop_code=0, mobpars=None, hdelta=0.5, cross=False):\n\t\t\"\"\"\n\t\t :param input_shapes: tuple ((50,60,60), (25,60,60))\n\t\t :param number_convolutional_layers:\n\t\t :param filters_size:\n\t\t :param filters_numbers:\n\t\t :param ndense_units:\n\t\t :param weight_decay:\n\t\t :param dropout:\n\t\t :param optimizer:\n\t\t :param margin:\n\t\t :return:\n\t\t \"\"\"\n\n\t\tif number_convolutional_layers < 1:\n\t\t\tprint(\"ERROR: Number of convolutional layers must be greater than 0\")\n\n\t\toutputs = []\n\t\tlosses = []\n\t\tmetrics = []\n\n\t\tif lstm == 0:\n\t\t\tinput = tf.keras.layers.Input(shape=(None, input_shape[0], input_shape[1], input_shape[2]), name='input')\n\t\t\tlstm_layer = tf.keras.layers.ConvLSTM2D(16, 3, padding='same', data_format='channels_first')(input)\n\t\t\tinput_shape = (16, input_shape[1], input_shape[2])\n\t\telif lstm == 1 or lstm == 2:\n\t\t\tinput = tf.keras.layers.Input(shape=(None, input_shape[0], input_shape[1], input_shape[2]), name='input')\n\t\t\tinput_shape = (None, input_shape[0], input_shape[1], input_shape[2])\n\t\telif lstm == 5:\n\t\t\tinput = tf.keras.layers.Input(shape=(None, input_shape[0], input_shape[1], input_shape[2]), name='input')\n\t\t\tlstm_layer = tf.keras.layers.ConvLSTM2D(50, 3, padding='same', data_format='channels_first')(input)\n\t\t\tinput_shape = (50, input_shape[1], input_shape[2])\n\t\telif lstm == 6:\n\t\t\tinput = tf.keras.layers.Input(shape=(None, input_shape[0], input_shape[1], input_shape[2]), name='input')\n\t\t\tlstm_layer = tf.keras.layers.ConvLSTM2D(16, 7, padding='same', data_format='channels_first')(input)\n\t\t\tinput_shape = (16, input_shape[1], input_shape[2])\n\t\telif lstm == 7:\n\t\t\tinput = tf.keras.layers.Input(shape=(None, input_shape[0], input_shape[1], input_shape[2]), name='input')\n\t\t\tlstm_layer = tf.keras.layers.ConvLSTM2D(50, 7, padding='same', data_format='channels_first')(input)\n\t\t\tinput_shape = (50, input_shape[1], input_shape[2])\n\t\telif lstm >= 8:\n\t\t\tinput = tf.keras.layers.Input(shape=(None, input_shape[0], input_shape[1], input_shape[2]), name='input')\n\t\t\tlstm_layer = tf.keras.layers.ConvLSTM2D(50, 3, padding='same', data_format='channels_first')(input)\n\t\t\tinput_shape = [(50, input_shape[1], input_shape[2]), (None, input_shape[0], input_shape[1], input_shape[2])]\n\t\telse:\n\t\t\tinput = tf.keras.layers.Input(shape=input_shape, name='input')\n\t\tif use3D:\n\t\t\tif model_version == 'bmvc' or model_version=='smallA' or model_version=='smallB' :\n\t\t\t\tconvBranch = self.build_3Dbranch_Manuel(\"convBranch\", input_shape, ndense_units, \n\t\t\t\t\t dropout=dropout, kernel_initializer=kinit, filters_size=filters_size,\n\t\t\t\t\t filters_numbers=filters_numbers)\n\t\t\telif model_version == \"gaitset\" or model_version == \"gaitset_cross\":\n\t\t\t\tconvBranch = self.build_gaitset_branch(input_layer=input, input_shape=input_shape)\n\t\t\telse:\n\t\t\t\tconvBranch = self.build_3Dbranch(\"convBranch\", input_shape, number_convolutional_layers, filters_size, strides,\n\t\t\t\t filters_numbers, weight_decay, ndense_units, dropout)\n\t\telse:\n\t\t\tif model_version == 'bmvc':\n\t\t\t\tif lstm >= 8:\n\t\t\t\t\tlstm_branch1 = 5\n\t\t\t\t\tlstm_branch2 = 2\n\t\t\t\t\tconvBranch1 = self.build_branch_Manuel(\"convBranch\", input_shape[0], number_convolutional_layers,\n\t\t\t\t\t filters_size, strides, filters_numbers, weight_decay, ndense_units,\n\t\t\t\t\t dropout, lstm_branch1, lstm_number, dropout_lstm, L2_norm_lstm, False)\n\t\t\t\t\tconvBranch2 = self.build_branch_Manuel(\"convBranch2\", input_shape[1], number_convolutional_layers,\n\t\t\t\t\t filters_size, strides, filters_numbers, weight_decay, ndense_units,\n\t\t\t\t\t dropout, lstm_branch2, lstm_number, dropout_lstm, L2_norm_lstm, False)\n\t\t\t\t\tconvBranch = [convBranch1, convBranch2]\n\t\t\t\telse:\n\t\t\t\t\tconvBranch = self.build_branch_Manuel(\"convBranch\", input_shape, number_convolutional_layers, filters_size,\n\t\t\t\t strides, filters_numbers, weight_decay, ndense_units, dropout, lstm, lstm_number, dropout_lstm, L2_norm_lstm)\n\t\t\telif model_version == 'bmvcfc':\n\t\t\t\tconvBranch = self.build_branch_fc(\"convBranch\", input_shape, number_convolutional_layers,\n\t\t\t\t\t\t\t\t\t\t\t\t\t filters_size, strides, filters_numbers, weight_decay,\n\t\t\t\t\t\t\t\t\t\t\t\t ndense_units, dropout)\n\t\t\t\tconvBranch.summary()\n\t\t\telse:\n\t\t\t\tif lstm >= 8:\n\t\t\t\t\tlstm_branch1 = 5\n\t\t\t\t\tlstm_branch2 = 2\n\t\t\t\t\tconvBranch1 = self.build_branch(\"convBranch\", input_shape[0], number_convolutional_layers,\n\t\t\t\t\t filters_size, strides, filters_numbers, weight_decay, ndense_units,\n\t\t\t\t\t dropout, lstm_branch1, lstm_number, dropout_lstm, L2_norm_lstm, False)\n\t\t\t\t\tconvBranch2 = self.build_branch(\"convBranch2\", input_shape[1], number_convolutional_layers,\n\t\t\t\t\t filters_size, strides, filters_numbers, weight_decay, ndense_units,\n\t\t\t\t\t dropout, lstm_branch2, lstm_number, dropout_lstm, L2_norm_lstm, False)\n\t\t\t\t\tconvBranch = [convBranch1, convBranch2]\n\t\t\t\telse:\n\t\t\t\t\tconvBranch = self.build_branch(\"convBranch\", input_shape, number_convolutional_layers, filters_size, strides,\n\t\t\t\t filters_numbers, weight_decay, ndense_units, dropout, lstm, lstm_number,\n\t\t\t\t\t\t\t\t\t\t\t\t dropout_lstm, L2_norm_lstm, final_pool=False)\n\n\t\tif lstm == 0 or lstm == 5 or lstm == 6 or lstm == 7:\n\t\t\toutput = convBranch(lstm_layer)\n\t\telif lstm >= 8 and lstm <= 15:\n\t\t\toutput1 = convBranch[0](lstm_layer)\n\t\t\toutput2 = convBranch[1](input)\n\n\t\t\tif lstm == 10 or lstm == 15:\n\t\t\t\t# Add max layer\n\t\t\t\toutput1 = tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1), name=\"embedL2_1\")(output1)\n\t\t\t\toutput2 = tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1), name=\"embedL2_2\")(output2)\n\n\t\t\t# Add concat layer\n\t\t\tmain_branch = tf.keras.layers.Concatenate(axis=1)([output1, output2])\n\n\t\t\tif lstm == 11 or lstm == 12 or lstm == 15:\n\t\t\t\tmain_branch = tf.keras.layers.Dropout(dropout, name=\"drop_concat\")(main_branch)\n\n\t\t\tif lstm != 9 and lstm != 12 and lstm != 15:\n\t\t\t\t# Add dense layer + dropout\n\t\t\t\tmain_branch = tf.keras.layers.Dense(ndense_units * 2, name=\"dense\")(main_branch)\n\t\t\t\tif dropout > 0:\n\t\t\t\t\tmain_branch = tf.keras.layers.Dropout(dropout, name=\"drop\")(main_branch)\n\t\t\toutput = tf.keras.layers.Dense(ndense_units, name=\"code\")(main_branch)\n\n\t\telse:\n\t\t\tif model_version != \"gaitset\" and model_version != \"gaitset_cross\":\n\t\t\t\toutput = convBranch(input)\n\t\t\telse:\n\t\t\t\toutput = convBranch\n\n\t\tif drop_code > 0:\n\t\t\toutput = tf.keras.layers.Dropout(drop_code, name=\"drop_code\")(output)\n\n\t\toutputs.append(output)\n\n\t\tif model_version == \"gaitset_cross\":\n\t\t\toutputs.append(output)\n\t\t\toutput = tf.keras.layers.Dense(nclasses)(output)\n\t\t\toutputs.append(output)\n\n\n\t\t# main_branch = tf.keras.layers.Dense(1024, activation='linear', kernel_initializer='he_uniform',\n\t\t# \t\t\t\t\t\t\t\t\tname=\"signature\")(output)\n\t\tif with_l2:\n\t\t\tl2norm_ = tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1), name=\"signature\")(output)\n\t\t\toutputs.append(l2norm_)\n\n\t\tif loss_mode == \"MSE\":\n\t\t\tlosses.append('mean_squared_error')\n\t\telif loss_mode == \"sL1\":\n\t\t\tlosses.append(mj_smoothL1)\n\t\telif loss_mode == \"sL1e\" or loss_mode == \"sL1x\":\n\t\t\tlosses.append(mj_smoothL1ex(hdelta))\n\t\telif loss_mode == \"Huber\":\n\t\t\tlosses.append(tf.keras.losses.Huber(delta=hdelta))\n\t\telif loss_mode == \"sL1tri\":\n\t\t\tlosses = [mj_smoothL1, tfa.losses.TripletSemiHardLoss(margin=margin)]\n\t\telif loss_mode == \"sL1triH\":\n\t\t\tlosses = [mj_smoothL1, tfa.losses.TripletHardLoss(margin=margin)]\n\t\telif loss_mode == \"sL1triB\":\n\t\t\tlosses = [mj_smoothL1, TripletBatchAllLoss(margin=margin)]\n\t\telse:\n\t\t\tprint(\"ERROR: invalid loss mode - \"+loss_mode)\n\n\n\t\tmetrics.append(tf.keras.metrics.MeanAbsoluteError())\n\t\tmetrics.append(tf.keras.metrics.MeanSquaredError())\n\n\t\tif model_version == \"gaitset_cross\":\n\t\t\tlosses.append('sparse_categorical_crossentropy')\n\t\t\tmetrics.append('accuracy')\n\t\t\tloss_weights.append(0.2)\n\n\t\tself.model = tf.keras.Model(inputs=input, outputs=outputs, name=\"copynet\")\n\t\t#self.model.compile(optimizer=optimizer, loss=losses, loss_weights=loss_weights, metrics=metrics)\n\n\t\t# Save useful info for recovering the model with different Python versions\n\t\tmodelpars = {'filters_size': filters_size,\n\t\t 'filters_numbers': filters_numbers,\n\t\t 'input_shape': input_shape,\n\t\t 'ndense_units': ndense_units,\n\t\t 'weight_decay': weight_decay,\n\t\t 'dropout': dropout,\n\t\t 'optimizer': optimizer,\n\t\t 'custom': 'TripletSemiHardLoss',\n\t\t 'nclasses': nclasses,\n\t\t 'use3D': use3D,\n\t\t 'model_version': model_version,\n\t\t 'loss_mode': loss_mode,\n\t\t 'loss_weights': loss_weights,\n\t\t 'margin': margin,\n\t\t\t\t\t 'l2': with_l2}\n\t\t#dd.io.save(os.path.join(self.experdir, \"model-config.hdf5\"), modelpars)\n\n\tdef build_branch(self, name, input_shape=(50, 60, 60), number_convolutional_layers=4, filters_size=None,\n\t strides=None, filters_numbers=None, weight_decay=0.0005, ndense_units=4096, dropout=0.4, lstm=-1,\n\t\t\t\t\t lstm_number=512, dropout_lstm=0.0, L2_norm_lstm=None, add_dense=True, final_pool=True):\n\t\tif filters_numbers is None:\n\t\t\tfilters_numbers = [96, 192, 512, 4096]\n\t\tL2_norm = tf.keras.regularizers.l2(weight_decay)\n\t\tif L2_norm_lstm is not None:\n\t\t\tL2_norm_lstm = tf.keras.regularizers.l2(L2_norm_lstm)\n\n\t\tconvBranch = tf.keras.Sequential(name=name)\n\t\tif lstm == 2:\n\t\t\tconvBranch.add(tf.keras.layers.TimeDistributed(\n\t\t\t\ttf.keras.layers.Conv2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]),\n\t\t\t\t kernel_regularizer=L2_norm, activation='relu', input_shape=input_shape,\n\t\t\t\t data_format='channels_first')))\n\n\t\t\tconvBranch.add(tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first')))\n\n\t\t\tfor i in range(1, number_convolutional_layers):\n\t\t\t\tconvBranch.add(tf.keras.layers.TimeDistributed(\n\t\t\t\t\ttf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(filters_size[i]), strides=(strides[i]),\n\t\t\t\t\t kernel_regularizer=L2_norm, activation='relu',\n\t\t\t\t\t data_format='channels_first')))\n\n\t\t\t\tconvBranch.add(tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first')))\n\n\t\t\tconvBranch.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Flatten(name=\"flatten\")))\n\t\t\tconvBranch.add(tf.keras.layers.LSTM(lstm_number, dropout=dropout_lstm, kernel_regularizer=L2_norm_lstm))\n\n\t\t\tif add_dense:\n\t\t\t\t# Add dense layer + dropout\n\t\t\t\tconvBranch.add(tf.keras.layers.Dense(ndense_units, name=\"dense\"))\n\t\t\t\tif dropout > 0:\n\t\t\t\t\tconvBranch.add(tf.keras.layers.Dropout(dropout, name=\"drop\"))\n\t\telse:\n\t\t\tif lstm == 1:\n\t\t\t\tconvBranch.add(tf.keras.layers.ConvLSTM2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]),\n\t\t\t\t\t kernel_regularizer=L2_norm, input_shape=input_shape,\n\t\t\t\t\t data_format='channels_first'))\n\t\t\telse:\n\t\t\t\tconvBranch.add(tf.keras.layers.Conv2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]),\n\t\t\t kernel_regularizer=L2_norm, activation='relu', input_shape=input_shape,\n\t\t\t data_format='channels_first'))\n\n\t\t\tconvBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))\n\n\t\t\tfor i in range(1, number_convolutional_layers):\n\t\t\t\tconvBranch.add(tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(filters_size[i]), strides=(strides[i]),\n\t\t\t\t kernel_regularizer=L2_norm, activation='relu',\n\t\t\t\t data_format='channels_first'))\n\n\t\t\t\t# Needed for IWANN model:\n\t\t\t\tif (i < (number_convolutional_layers - 1)) or (i == (number_convolutional_layers - 1) and final_pool):\n\t\t\t\t\tconvBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))\n\n\t\t\tconvBranch.add(tf.keras.layers.Flatten(name=\"flatten\"))\n\n\t\t\tif add_dense:\n\t\t\t\t# Add dense layer + dropout\n\t\t\t\tconvBranch.add(tf.keras.layers.Dense(ndense_units, name=\"dense\"))\n\t\t\t\tif dropout > 0:\n\t\t\t\t\tconvBranch.add(tf.keras.layers.Dropout(dropout, name=\"drop\"))\n\n\t\treturn convBranch\n\n\tdef build_branch_Manuel(self, name, input_shape=(50, 60, 60), number_convolutional_layers=4, filters_size=None,\n\t strides=None, filters_numbers=None, weight_decay=0.0005, ndense_units=1024, dropout=0.4, lstm=-1,\n\t\t\t\t\t\t\tlstm_number=512, dropout_lstm=0.0, L2_norm_lstm=None, add_dense=True):\n\t\tif filters_numbers is None:\n\t\t\tfilters_numbers = [96, 192, 512, 512]\n\t\tL2_norm = tf.keras.regularizers.l2(weight_decay)\n\t\tif L2_norm_lstm is not None:\n\t\t\tL2_norm_lstm = tf.keras.regularizers.l2(L2_norm_lstm)\n\n\t\tconvBranch = tf.keras.Sequential(name=name)\n\n\t\tif lstm == 2:\n\t\t\tconvBranch.add(tf.keras.layers.TimeDistributed(\n\t\t\t\ttf.keras.layers.Conv2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]),\n\t\t\t\t kernel_regularizer=L2_norm, activation='relu', input_shape=input_shape,\n\t\t\t\t data_format='channels_first')))\n\n\t\t\tconvBranch.add(tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first')))\n\n\t\t\tfor i in range(1, number_convolutional_layers):\n\t\t\t\tconvBranch.add(tf.keras.layers.TimeDistributed(\n\t\t\t\t\ttf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(filters_size[i]), strides=(strides[i]),\n\t\t\t\t\t kernel_regularizer=L2_norm, activation='relu',\n\t\t\t\t\t data_format='channels_first')))\n\n\t\t\t\t#if i != number_convolutional_layers - 1:\n\t\t\t\tconvBranch.add(tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first')))\n\n\t\t\tconvBranch.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Flatten(name=\"flatten\")))\n\t\t\tconvBranch.add(tf.keras.layers.LSTM(lstm_number, dropout=dropout_lstm, kernel_regularizer=L2_norm_lstm))\n\n\t\t\tif add_dense:\n\t\t\t\t# Add dense layer + dropout\n\t\t\t\tconvBranch.add(tf.keras.layers.Dense(ndense_units * 2, name=\"dense\"))\n\t\t\t\tif dropout > 0:\n\t\t\t\t\tconvBranch.add(tf.keras.layers.Dropout(dropout, name=\"drop\"))\n\t\t\t\tconvBranch.add(tf.keras.layers.Dense(ndense_units, name=\"code\"))\n\t\telse:\n\t\t\tif lstm == 1:\n\t\t\t\tconvBranch.add(tf.keras.layers.ConvLSTM2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]),\n\t\t\t\t\t kernel_regularizer=L2_norm, input_shape=input_shape,\n\t\t\t\t\t data_format='channels_first'))\n\t\t\telse:\n\t\t\t\tconvBranch.add(tf.keras.layers.Conv2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]),\n\t\t\t kernel_regularizer=L2_norm, activation='relu', input_shape=input_shape,\n\t\t\t data_format='channels_first'))\n\n\t\t\tconvBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))\n\n\t\t\tfor i in range(1, number_convolutional_layers):\n\t\t\t\tconvBranch.add(tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(filters_size[i]), strides=(strides[i]),\n\t\t\t\t kernel_regularizer=L2_norm, activation='relu',\n\t\t\t\t data_format='channels_first'))\n\n\t\t\t\t#if i != number_convolutional_layers - 1:\n\t\t\t\tconvBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))\n\n\t\t\tconvBranch.add(tf.keras.layers.Flatten(name=\"flatten\"))\n\n\t\t\tif add_dense:\n\t\t\t\t# Add dense layer + dropout\n\t\t\t\tconvBranch.add(tf.keras.layers.Dense(ndense_units * 2, name=\"dense\"))\n\t\t\t\tif dropout > 0:\n\t\t\t\t\tconvBranch.add(tf.keras.layers.Dropout(dropout, name=\"drop\"))\n\t\t\t\tconvBranch.add(tf.keras.layers.Dense(ndense_units, name=\"code\"))\n\n\t\treturn convBranch\n\n\t# Fully-convolutional branch\n\tdef build_branch_fc(self, name, input_shape=(50, 60, 60), number_convolutional_layers=4, filters_size=None,\n\t strides=None, filters_numbers=None, weight_decay=0.0005, ndense_units=1024, dropout=0.4):\n\t\tif filters_numbers is None:\n\t\t\tfilters_numbers = [96, 192, 512, 512]\n\t\tL2_norm = tf.keras.regularizers.l2(weight_decay)\n\n\t\tconvBranch = tf.keras.Sequential(name=name)\n\n\t\tconvBranch.add(tf.keras.layers.Conv2D(filters_numbers[0], kernel_size=(filters_size[0]), strides=(strides[0]),\n\t\t\t kernel_regularizer=L2_norm, activation='relu', input_shape=input_shape,\n\t\t\t data_format='channels_first'))\n\n\t\tconvBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))\n\n\t\tfor i in range(1, number_convolutional_layers):\n\t\t\tconvBranch.add(tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(filters_size[i]), strides=(strides[i]),\n\t\t\t\t kernel_regularizer=L2_norm, activation='relu',\n\t\t\t\t data_format='channels_first'))\n\n\t\t\t\t#if i != number_convolutional_layers - 1:\n\t\t\tconvBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))\n\n\t\t# convBranch.add(tf.keras.layers.Flatten(name=\"flatten\"))\n\n\t\t# Add dense layer + dropout\n\t\t# convBranch.add(tf.keras.layers.Dense(ndense_units * 2, name=\"dense\"))\n\t\tconvBranch.add(tf.keras.layers.Conv2D(ndense_units * 2, kernel_size=1, strides=1, name=\"fc1\",\n\t\t\t\t\t\t\t\t\t\t\t data_format='channels_first'))\n\t\tif dropout > 0:\n\t\t\tconvBranch.add(tf.keras.layers.Dropout(dropout, name=\"drop\"))\n\t\tconvBranch.add(tf.keras.layers.Conv2D(ndense_units, kernel_size=1, strides=1, name=\"code0\",\n\t\t\t\t\t\t\t\t\t\t\t\t data_format='channels_first'))\n\t\t# \tconvBranch.add(tf.keras.layers.Dense(ndense_units, name=\"code\"))\n\t\tconvBranch.add(tf.keras.layers.Flatten(name=\"code\"))\n\n\t\treturn convBranch\n\n\tdef build_3Dbranch(self, name, input_shape=(50, 60, 60), number_convolutional_layers=4, filters_size=None,\n\t strides=None, filters_numbers=None, weight_decay=0.0005, ndense_units=4096, dropout=0.4):\n\t\tif filters_numbers is None:\n\t\t\tfilters_numbers = [96, 192, 512, 4096]\n\t\tL2_norm = tf.keras.regularizers.l2(weight_decay)\n\n\t\timport pdb; pdb.set_trace()\n\n\t\tconvBranch = tf.keras.Sequential(name=name)\n\n\t\tconvBranch.add(tf.keras.layers.Conv3D(filters_numbers[0], kernel_size=(3, filters_size[0], filters_size[0]),\n\t\t strides=(1, strides[0], strides[0]), kernel_regularizer=L2_norm,\n\t\t activation='relu', input_shape=input_shape, data_format='channels_last'))\n\n\t\tconvBranch.add(tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), data_format='channels_last'))\n\n\t\tfor i in range(1, number_convolutional_layers):\n\t\t\tconvBranch.add(tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(3, filters_size[i], filters_size[i]),\n\t\t\t strides=(1, strides[i], strides[i]), kernel_regularizer=L2_norm,\n\t\t\t activation='relu', data_format='channels_last'))\n\n\t\t\tif i != number_convolutional_layers - 1:\n\t\t\t\tconvBranch.add( tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(3, filters_size[i], filters_size[i]),\n\t\t\t\t\t strides=(1, strides[i], strides[i]), kernel_regularizer=L2_norm,\n\t\t\t\t\t activation='relu', data_format='channels_last'))\n\n\t\t\t\tconvBranch.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2, 2), data_format='channels_last'))\n\t\t\telse:\n\t\t\t\tconvBranch.add(tf.keras.layers.Conv2D(filters_numbers[i], kernel_size=(filters_size[i]),\n\t\t\t\t\t strides=(1, strides[i], strides[i]), kernel_regularizer=L2_norm,\n\t\t\t\t\t activation='relu', data_format='channels_last'))\n\n\t\tconvBranch.add(tf.keras.layers.Flatten(name=\"flatten\"))\n\n\t\t# Add dense layer + dropout\n\t\tconvBranch.add(tf.keras.layers.Dense(ndense_units, name=\"dense\"))\n\t\tif dropout > 0:\n\t\t\tconvBranch.add(tf.keras.layers.Dropout(dropout, name=\"drop\"))\n\n\t\treturn convBranch\n\n\tdef build_3Dbranch_Manuel(self, name, input_shape=(25, 60, 60, 1), ndense_units=512, \n\t\t dropout=0.4, kernel_initializer='glorot_uniform', \n\t\t filters_size = [(3, 5, 5), (3, 3, 3), (3, 3, 3), (3, 3, 3), (3, 2, 2), (2, 1, 1)], \n\t\t filters_numbers = [64, 128, 256, 512, 512, 512]):\n\t\tconvBranch = tf.keras.Sequential(name=name)\n\n\t\tconvBranch.add(tf.keras.layers.Conv3D(filters_numbers[0], filters_size[0], strides=(1, 2, 2), padding='valid', activation='relu',\n\t\t input_shape=input_shape, data_format='channels_last', kernel_initializer=kernel_initializer))\n\n\t\tconvBranch.add(tf.keras.layers.Conv3D(filters_numbers[1], filters_size[1], strides=(1, 2, 2), padding='valid', activation='relu',\n\t\t input_shape=input_shape, data_format='channels_last', kernel_initializer=kernel_initializer))\n\n\t\tconvBranch.add(tf.keras.layers.Conv3D(filters_numbers[2], filters_size[2], strides=(2, 2, 2), padding='valid', activation='relu',\n\t\t input_shape=input_shape, data_format='channels_last', kernel_initializer=kernel_initializer))\n\n\t\tconvBranch.add(tf.keras.layers.Conv3D(filters_numbers[3], filters_size[3], strides=(2, 2, 2), padding='valid', activation='relu',\n\t\t input_shape=input_shape, data_format='channels_last', kernel_initializer=kernel_initializer))\n\n\t\tconvBranch.add(tf.keras.layers.Conv3D(filters_numbers[4], filters_size[4], strides=(1, 1, 1), padding='valid', activation='relu',\n\t\t input_shape=input_shape, data_format='channels_last', kernel_initializer=kernel_initializer))\n\n\t\tconvBranch.add(tf.keras.layers.Conv3D(filters_numbers[5], filters_size[5], strides=(1, 1, 1), padding='valid', activation='relu',\n\t\t input_shape=input_shape, data_format='channels_last', kernel_initializer=kernel_initializer))\n\n\t\tif dropout > 0:\n\t\t\tconvBranch.add(tf.keras.layers.Dropout(dropout, name=\"drop\"))\n\n\t\t# Dense without activation function\n\t\tconvBranch.add(tf.keras.layers.Conv3D(ndense_units, (1, 1, 1), strides=(1, 1, 1), activation=None,\n\t\t kernel_regularizer=tf.keras.regularizers.l2(1e-3),\n\t\t kernel_initializer='he_uniform', name=\"grayCode\"))\n\n\t\tconvBranch.add(tf.keras.layers.Flatten(name=\"code\"))\n\n\t\treturn convBranch\n\n\tdef fit(self, epochs, callbacks, training_generator, validation_generator, current_step=0, validation_steps=None, encode_layer=None, steps_per_epoch=None):\n\t\tself.hist = self.model.fit(training_generator, validation_data=validation_generator, epochs=epochs,\n\t\t callbacks=callbacks, validation_steps=validation_steps, initial_epoch=current_step,\n\t\t verbose=1, steps_per_epoch=steps_per_epoch) #, workers=4, max_queue_size=10, use_multiprocessing=True)\n\n\t\tif encode_layer is None:\n\t\t\tself.model_encode = tf.keras.Model(self.model.input, self.model.layers[-1].input)\n\t\telse:\n\t\t\tout_layer = self.model.get_layer(encode_layer).output\n\t\t\tself.model_encode = tf.keras.Model(self.model.input, out_layer)\n\t\treturn len(self.hist.epoch)\n\n\tdef predict(self, data, batch_size=128):\n\t\tpred = self.model.predict(data, batch_size=batch_size)\n\t\treturn pred\n\n\tdef encode(self, data, reshape=False):\n\t\t# features = self.model_encode(data)\n\t\tif reshape:\n\t\t\tn_data = np.zeros(shape=(data.shape[0], 25, 60, 60, 2))\n\t\t\tfor i in range(25):\n\t\t\t\tn_data[:, i, :, :, 0] = data[:, i*2, :, :]\n\t\t\t\tn_data[:, i, :, :, 1] = data[:, i*2+1, :, :]\n\t\t\tdata = n_data\n\t\tfeatures = self.model.predict(data)\n\t\tif isinstance(features, list):\n\t\t\tfeatures = features[0]\n\n\t\t# L2 normalize embeddings\n\t\tcodes_norm_tf = tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))(features)\n\n\t\t# Get the numpy matrix\n\t\tcodes_norm = codes_norm_tf.numpy()\n\t\treturn codes_norm\n\n\tdef save(self, epoch=None):\n\t\tif epoch is not None:\n\t\t\tself.model.save(os.path.join(self.experdir, \"model-state-{:04d}.hdf5\".format(epoch)))\n\t\telse:\n\t\t\tself.model.save(os.path.join(self.experdir, \"model-final.hdf5\"))\n\n\tdef build_gaitset_branch(self, input_layer, input_shape=(25, 60, 60, 1)):\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.ZeroPadding2D(padding=4))(input_layer)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(\n\t\t\ttf.keras.layers.Conv2D(32, kernel_size=5, activation=None, padding='valid', use_bias=False,\n\t\t\t\t\t\t data_format='channels_last'))(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.LeakyReLU())(branch_a)\n\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.ZeroPadding2D(padding=1))(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(\n\t\t\ttf.keras.layers.Conv2D(32, kernel_size=3, activation=None, padding='valid', use_bias=False,\n\t\t\t\t\t\t input_shape=input_shape, data_format='channels_last'))(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.LeakyReLU())(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_last'))(branch_a)\n\n\t\tbranch_b = tf.keras.layers.Lambda(lambda x: tf.math.reduce_max(x, axis=1))(branch_a)\n\t\tbranch_b = tf.keras.layers.ZeroPadding2D(padding=1)(branch_b)\n\t\tbranch_b = tf.keras.layers.Conv2D(64, kernel_size=3, activation=None, padding='valid', use_bias=False,\n\t\t\t\t\t\t\t\t data_format='channels_last')(branch_b)\n\t\tbranch_b = tf.keras.layers.LeakyReLU()(branch_b)\n\t\tbranch_b = tf.keras.layers.ZeroPadding2D(padding=1)(branch_b)\n\t\tbranch_b = tf.keras.layers.Conv2D(64, kernel_size=3, activation=None, padding='valid', use_bias=False,\n\t\t\t\t\t\t\t\t data_format='channels_last')(branch_b)\n\t\tbranch_b = tf.keras.layers.LeakyReLU()(branch_b)\n\t\tbranch_b = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(branch_b)\n\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.ZeroPadding2D(padding=1))(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(\n\t\t\ttf.keras.layers.Conv2D(64, kernel_size=3, activation=None, padding='valid', use_bias=False,\n\t\t\t\t\t\t input_shape=input_shape, data_format='channels_last'))(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.LeakyReLU())(branch_a)\n\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.ZeroPadding2D(padding=1))(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(\n\t\t\ttf.keras.layers.Conv2D(64, kernel_size=3, activation=None, padding='valid', use_bias=False,\n\t\t\t\t\t\t input_shape=input_shape, data_format='channels_last'))(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.LeakyReLU())(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_last'))(branch_a)\n\n\t\tbranch_b_ = tf.keras.layers.Lambda(lambda x: tf.math.reduce_max(x, axis=1))(branch_a)\n\t\tbranch_b = tf.keras.layers.Add()([branch_b, branch_b_])\n\t\tbranch_b = tf.keras.layers.ZeroPadding2D(padding=1)(branch_b)\n\t\tbranch_b = tf.keras.layers.Conv2D(128, kernel_size=3, activation=None, padding='valid', use_bias=False,\n\t\t\t\t\t\t\t\t data_format='channels_last')(branch_b)\n\t\tbranch_b = tf.keras.layers.LeakyReLU()(branch_b)\n\t\tbranch_b = tf.keras.layers.ZeroPadding2D(padding=1)(branch_b)\n\t\tbranch_b = tf.keras.layers.Conv2D(128, kernel_size=3, activation=None, padding='valid', use_bias=False,\n\t\t\t\t\t\t\t\t data_format='channels_last')(branch_b)\n\t\tbranch_b = tf.keras.layers.LeakyReLU()(branch_b)\n\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.ZeroPadding2D(padding=1))(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(\n\t\t\ttf.keras.layers.Conv2D(128, kernel_size=3, activation=None, padding='valid', use_bias=False,\n\t\t\t\t\t\t input_shape=input_shape, data_format='channels_last'))(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.LeakyReLU())(branch_a)\n\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.ZeroPadding2D(padding=1))(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(\n\t\t\ttf.keras.layers.Conv2D(128, kernel_size=3, activation=None, padding='valid', use_bias=False,\n\t\t\t\t\t\t input_shape=input_shape, data_format='channels_last'))(branch_a)\n\t\tbranch_a = tf.keras.layers.TimeDistributed(tf.keras.layers.LeakyReLU())(branch_a)\n\t\tbranch_a = tf.keras.layers.Lambda(lambda x: tf.math.reduce_max(x, axis=1))(branch_a)\n\n\t\tbranch_b = tf.keras.layers.Add()([branch_b, branch_a])\n\n\t\t# HPP\n\t\tfeature = list()\n\t\tbin_num = [1, 2, 4, 8, 16]\n\t\t# bin_num = [1, 16]\n\t\tn, h, w, c = branch_b.shape\n\t\tprint(branch_b.shape)\n\t\tfor num_bin in bin_num:\n\t\t\tbranch_a_ = tf.keras.layers.Reshape((num_bin, -1, c))(branch_a)\n\t\t\tbranch_a_ = tf.keras.layers.Lambda(lambda x: tf.math.reduce_mean(x, axis=2) + tf.math.reduce_max(x, axis=2))(\n\t\t\t\tbranch_a_)\n\t\t\tfeature.append(branch_a_)\n\t\t\tbranch_b_ = tf.keras.layers.Reshape((num_bin, -1, c))(branch_b)\n\t\t\tbranch_b_ = tf.keras.layers.Lambda(lambda x: tf.math.reduce_mean(x, axis=2) + tf.math.reduce_max(x, axis=2))(\n\t\t\t\tbranch_b_)\n\t\t\tfeature.append(branch_b_)\n\n\t\tmodel = tf.keras.layers.Concatenate(axis=1)(feature)\n\t\tmodel = tf.keras.layers.Lambda(lambda x: tf.transpose(x, [1, 0, 2]))(model)\n\t\tmodel = MatMul()(model)\n\t\tmodel = tf.keras.layers.Lambda(lambda x: tf.transpose(x, [1, 0, 2]))(model)\n\t\tmodel = tf.keras.layers.Flatten()(model)\n\n\t\treturn model\n\nclass MatMul(tf.keras.layers.Layer):\n def __init__(self, bin_num=31, hidden_dim=128, **kwargs):\n super(MatMul, self).__init__(**kwargs)\n\n self.bin_num = bin_num\n self.hidden_dim = hidden_dim\n\n # Create a trainable weight variable for this layer.\n w_init = tf.keras.initializers.GlorotUniform()\n self.kernel = tf.Variable(name=\"MatMul_kernel\" + str(np.random.randint(100, size=1)),\n initial_value=w_init(shape=(bin_num * 2, 128, hidden_dim), dtype=\"float32\"),\n trainable=True)\n\n def call(self, x):\n # Implicit broadcasting occurs here.\n # Shape x: (BATCH_SIZE, N, M)\n # Shape kernel: (N, M)\n # Shape output: (BATCH_SIZE, N, M)\n return tf.matmul(x, self.kernel)\n\n def get_config(self):\n config = super().get_config().copy()\n config.update({\n 'bin_num': self.bin_num,\n 'hidden_dim': self.hidden_dim,\n })\n return config\n"
] |
[
[
"tensorflow.keras.models.load_model",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.ConvLSTM2D",
"tensorflow.keras.layers.ZeroPadding2D",
"tensorflow.keras.initializers.GlorotUniform",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.layers.Concatenate",
"numpy.random.randint",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.layers.MaxPooling3D",
"tensorflow.keras.metrics.MeanSquaredError",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Conv3D",
"tensorflow.keras.layers.Add",
"numpy.zeros",
"tensorflow.keras.layers.Flatten",
"tensorflow.matmul",
"tensorflow.math.reduce_max",
"tensorflow.keras.layers.Dense",
"tensorflow.math.l2_normalize",
"tensorflow.keras.backend.sum",
"tensorflow.keras.Model",
"tensorflow.keras.backend.abs",
"tensorflow.keras.losses.Huber",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.backend.switch",
"tensorflow.transpose",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.math.reduce_mean",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.metrics.MeanAbsoluteError",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Input"
]
] |
FerdinandZhong/ML-Model-CI
|
[
"90fa2de056dca05031f0787b96c520dc57dc664d"
] |
[
"modelci/app/experimental/endpoints/model_structure.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nAuthor: Li Yuanming\nEmail: [email protected]\nDate: 1/31/2021\n\nML model structure related API\n\"\"\"\nimport torch\nfrom fastapi import APIRouter\nfrom modelci.hub.manager import get_remote_model_weight\n\nfrom modelci.types.bo import Engine\n\nfrom modelci.persistence.service import ModelService\n\nfrom modelci.experimental.model.model_structure import Structure\n\nrouter = APIRouter()\n\n\[email protected]('/{id}', response_model=Structure)\nasync def get_model_structure(id: str): # noqa\n \"\"\"\n Get model structure as a model structure graph (connection between layer as edge, layers as nodes)\n\n Arguments:\n id (str): Model object ID.\n \"\"\"\n # return model DAG\n model = ModelService.get_model_by_id(id)\n if model.engine != Engine.PYTORCH:\n raise ValueError(f'model {id} is not supported for editing. '\n f'Currently only support model with engine=PYTORCH')\n\n # download model as local cache\n cache_path = get_remote_model_weight(model=model)\n net = torch.load(cache_path)\n return Structure.from_model(net)\n\n\[email protected]('/{id}') # TODO: add response_model\ndef update_model_structure_as_new(id: str, structure: Structure, dry_run: bool = False): # noqa\n \"\"\"\n TODO: Update model structure by adjusting layers (add, modify, delete) or rewiring the\n connections between layers.\n\n Examples:\n Fine-tune the model by modify the layer with name 'fc' (last layer). The layer\n has a changed argument out_features = 10. op_='M' indicates the operation to this layer ('fc')\n is 'Modify'. There is no changes in layer connections.\n Therefore, the structure change summary is\n [M] fc: (...) out_features=10\n\n >>> from collections import OrderedDict\n >>> structure_data = {\n ... 'layer': OrderedDict({'fc': {'out_features': 10, 'op_': 'M', 'type_': 'torch.nn.Linear'}})\n ... }\n >>> update_model_structure_as_new(id=..., structure=Structure.parse_obj(structure_data))\n\n Use original model as a feature extractor. The new model delete the last layer named 'fc', and add two\n layers as following:\n fc1: (nn.Linear) in_features=1024, out_features=512\n fc2: (nn.Linear) in_features=512, out_features=10\n The node change summary is\n [D] fc\n [A] fc1: (nn.Linear) in_features=1024, out_features=512\n [A] fc2: (nn.Linear) in_features=512, out_features=10\n Besides, we have connection changes:\n [D] conv1 -> fc\n [A] conv1 -> fc1\n [A] fc1 -> fc2\n\n >>>\n ... structure_data = {\n ... 'layer': {\n ... 'fc': {'op_': 'D'},\n ... 'fc1': {'in_features': 1024, 'out_features': 512, 'type_': 'torch.nn.Linear', 'op_': 'A'},\n ... 'fc2': {'in_features': 512, 'out_features': 10, 'type_': 'torch.nn.Linear', 'op_': 'A'},\n ... },\n ... 'connection': {\n ... 'conv1': {'fc': 'D', 'fc1': 'A'},\n ... 'fc1': {'fc2': 'A'},\n ... }\n ... }\n\n Args:\n id (str): Model object ID of the original structure.\n structure: A model structure graph indicating changed layer (node) and layer connection (edge).\n dry_run (bool): Dry run update for validation.\n\n Returns:\n\n \"\"\"\n raise NotImplementedError('Method `update_model_structure_as_new` not implemented.')\n"
] |
[
[
"torch.load"
]
] |
simon2112/OCR
|
[
"506ec018dc67832b41979784bfc847e1c7d73480"
] |
[
"Recursive/test.py"
] |
[
"from parse_args import parse_args\nimport editdistance\nimport numpy as np\nimport tensorflow as tf\nfrom cnn import build_cnn\nfrom network import build_network\nfrom tools import load_img\nimport tools\nflags = parse_args()\n\nclass Dataset:\n\n def __init__(self,x):\n self._index_in_epoch = 0\n self._epochs_completed = 0\n self._x = x\n \n self._num_examples = len(x)\n pass\n \n @property\n def x(self):\n return self._x\n \n def next_batch(self,batch_size):\n start = self._index_in_epoch\n if start == 0 and self._epochs_completed == 0:\n idx = np.arange(0, self._num_examples) # get all possible indexes\n\n self._x = self.x[idx] # get list of `num` random samples\n \n # go to the next batch\n if start + batch_size > self._num_examples:\n self._epochs_completed += 1\n rest_num_examples = self._num_examples - start\n x_rest_part = self._x[start:self._num_examples]\n \n idx0 = np.arange(0, self._num_examples) # get all possible indexes\n\n self._x = self.x[idx0] # get list of `num` random samples\n \n start = 0\n self._index_in_epoch = batch_size - rest_num_examples #avoid the case where the #sample != integar times of batch_size\n end = self._index_in_epoch \n x_new_part = self._x[start:end] \n\n return np.concatenate((x_rest_part, x_new_part), axis=0)\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return self._x[start:end]\n\ndef test():\n tf.reset_default_graph()\n infer_graph = tf.Graph()\n \n with infer_graph.as_default():\n encoder_outputs_t,inputs_t = build_cnn(False,flags.batch_size,flags.height, flags.width, flags.channels)\n _, _, pred_ids, logits_t, decoder_inputs_t, \\\n _, _ ,keep_prob_t= build_network(encoder_outputs_t,\n True, \n flags.batch_size,\n flags.decoder_length,\n flags.tgt_vocab_size,\n flags.attn_num_hidden,\n flags.encoder_length,\n flags.max_gradient_norm\n )\n infer_saver = tf.train.Saver()\n infer_sess = tf.Session(graph=infer_graph)\n model_file=tf.train.latest_checkpoint(flags.load_dir)\n infer_saver.restore(infer_sess, model_file)\n \n \n with open(flags.test_txt) as f:\n test = [line.rstrip() for line in f]\n test_len = len(test)\n test = np.array(test)\n data_test = Dataset(test)\n if flags.lex_txt != None:\n with open(flags.lex_txt) as f:\n lex = [line.rstrip().lower() for line in f]\n \n ti = int(test_len / flags.batch_size)\n rest = test_len % flags.batch_size\n \n gt = []\n predict = []\n \n for t in range(ti):\n batch_test = data_test.next_batch(flags.batch_size)\n path = []\n texts = [] \n for line in batch_test:\n path.append(line.split(' ',1)[0])\n texts.append(line.split(' ',1)[1])\n \n images = load_img(path,flags.height,flags.width)\n \n testing_decoder_inputs = np.zeros((flags.decoder_length,flags.batch_size), dtype=float)\n feed_dict_t = {inputs_t:images[:, :, :, np.newaxis],\n decoder_inputs_t:testing_decoder_inputs,keep_prob_t:1}\n q= infer_sess.run( pred_ids,feed_dict=feed_dict_t)\n \n for j in range(flags.batch_size):\n gt.append(texts[j])\n ans = np.array(q).T[j]\n pd = []\n for c in ans:\n if c != -1:\n character = tools.idx_to_word[c]\n if character != '<EOS>':\n pd.append(character)\n predict.append(''.join(pd))\n \n batch_test = data_test.next_batch(flags.batch_size)\n path = []\n texts = [] \n for line in batch_test:\n path.append(line.split(' ',1)[0])\n texts.append(line.split(' ',1)[1])\n images = load_img(path,flags.height,flags.width)\n \n \n feed_dict_t = {inputs_t:images[:, :, :, np.newaxis],\n decoder_inputs_t:testing_decoder_inputs,keep_prob_t:1}\n q = infer_sess.run( pred_ids,feed_dict=feed_dict_t)\n \n \n for k in range(rest):\n gt.append(texts[k])\n ans = np.array(q).T[k]\n pd = []\n for c in ans:\n if c != -1:\n character = tools.idx_to_word[c]\n if character != '<EOS>':\n pd.append(character)\n predict.append(''.join(pd))\n \n correct = float(0) \n cnt = 0\n acc_s = 0\n \n for l in range(len(gt)):\n cnt =cnt + 1\n if gt[l] == predict[l]:\n correct = correct + 1 \n \n acc_s = correct / cnt\n if flags.lex_txt != None: \n correct_l = float(0) \n cnt = 0\n for l in range(len(gt)):\n cnt =cnt + 1\n lexicon = lex[l].split(',')\n dt = editdistance.eval(predict[l], lexicon[0])\n pl = lexicon[0]\n for ll in lexicon[1:]:\n dt_temp = editdistance.eval(predict[l], ll)\n \n if dt_temp < dt:\n dt = dt_temp\n pl = ll\n if pl == gt[l]:\n correct_l = correct_l + 1\n \n acc_l = correct_l / cnt \n \n print('accuracy: ', acc_s)\n if flags.lex_txt != None:\n print('accuracy with lexicon: ', acc_l)\n\nif __name__ == '__main__':\n test()\n\n\n\n\n \n\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.train.latest_checkpoint",
"numpy.arange",
"numpy.concatenate",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.array",
"numpy.zeros"
]
] |
krishpop/leibnizgym
|
[
"21deb066af612b764c25a28d37bfa8f9cf6c124d"
] |
[
"scripts/rlg_train.py"
] |
[
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\n# isaacgym-rlgpu\nfrom isaacgym import rlgpu\nfrom rlgpu.utils.config import set_np_formatting, set_seed\n# leibniz-gym: dump all environments for loading\nfrom leibnizgym.envs.trifinger import TrifingerEnv as Trifinger\n# leibnizgym\nfrom leibnizgym.wrappers.vec_task import VecTaskPython\nfrom leibnizgym.utils.config_utils import load_cfg, get_args\nfrom leibnizgym.utils.errors import InvalidTaskNameError\nfrom leibnizgym.utils.message import *\n# rl-games\nfrom rl_games.common import env_configurations, vecenv\nfrom rl_games.torch_runner import Runner\nfrom rl_games.common import wrappers\nfrom rl_games.common.algo_observer import AlgoObserver\nfrom rl_games.algos_torch import torch_ext\nimport torch\nimport numpy as np\n# python\nimport os\nimport argparse\nimport yaml\nfrom datetime import datetime\n\n\ndef parse_vec_task(args: argparse.Namespace, cfg: dict) -> VecTaskPython:\n \"\"\"Parses the configuration parameters for the environment task.\n\n TODO (@mayank): Remove requirement for args and make this a normal function\n inside utils.\n Args:\n args: command line arguments.\n cfg: environment configuration dictionary (task)\n\n Returns:\n TThe vectorized RL-env wrapped around the task.\n \"\"\"\n # create native task and pass custom config\n if args.task_type == \"Python\":\n # check device on which to run agent and environment\n if args.device == \"CPU\":\n print_info(\"Running using python CPU...\")\n # check if agent is on different device\n sim_device = 'cpu'\n ppo_device = 'cuda:0' if args.ppo_device == \"GPU\" else 'cpu'\n else:\n print_info(\"Running using python GPU...\")\n sim_device = 'cuda:0'\n ppo_device = 'cuda:0'\n # create the IsaacEnvBase defined using leibnizgym\n try:\n task = eval(args.task)(config=cfg, device=sim_device,\n visualize=not args.headless,\n verbose=args.verbose)\n except NameError:\n raise InvalidTaskNameError(args.task)\n # wrap environment around vec-python wrapper\n env = VecTaskPython(task, rl_device=ppo_device, clip_obs=5, clip_actions=1)\n else:\n raise ValueError(f\"No task of type `{args.task_type}` in leibnizgym.\")\n\n return env\n\n\ndef create_rlgpu_env(**kwargs):\n \"\"\"\n Creates the task from configurations and wraps it using RL-games wrappers if required.\n \"\"\"\n # TODO (@arthur): leibnizgym parse task\n env = parse_vec_task(cli_args, task_cfg)\n # print the environment information\n print_info(env)\n # save environment config into file\n env.dump_config(os.path.join(logdir, 'env_config.yaml'))\n # wrap around the environment\n frames = kwargs.pop('frames', 1)\n if frames > 1:\n env = wrappers.FrameStack(env, frames, False)\n return env\n\n\nclass RlGamesGpuEnvAdapter(vecenv.IVecEnv):\n \"\"\"\n Adapter from VecPythonTask to Rl-Games VecEnv.\n \"\"\"\n\n def __init__(self, config_name: str, num_actors: int, **kwargs):\n # this basically calls the `create_rlgpu_env()` function for RLGPU environment.\n self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs)\n # check if environment is for asymmetric PPO or not\n self.use_global_obs = (self.env.num_states > 0)\n # get initial observations\n self.full_state = {\n \"obs\": self.env.reset()\n }\n # get state if assymmetric environment\n if self.use_global_obs:\n self.full_state[\"states\"] = self.env.get_state()\n\n \"\"\"\n Properties\n \"\"\"\n\n def get_number_of_agents(self):\n return self.env.get_number_of_agents()\n\n def get_env_info(self):\n info = {\n 'num_envs': self.env.num_envs,\n 'action_space': self.env.action_space,\n 'observation_space': self.env.observation_space\n }\n # print the spaces (for debugging)\n print(\">> Action space: \", info['action_space'])\n print(\">> Observation space: \", info['observation_space'])\n # check if environment is for asymmetric PPO or not\n if self.use_global_obs:\n info['state_space'] = self.env.state_space\n print(\">> State space: \", info['state_space'])\n # return the information about spaces\n return info\n\n \"\"\"\n Operations\n \"\"\"\n\n def reset(self):\n # reset the environment\n self.full_state[\"obs\"] = self.env.reset()\n # check if environment is for asymmetric PPO or not\n if self.use_global_obs:\n self.full_state[\"states\"] = self.env.get_state()\n return self.full_state\n else:\n return self.full_state[\"obs\"]\n\n def step(self, action):\n # step through the environment\n next_obs, reward, is_done, info = self.env.step(action)\n # check if environment is for asymmetric PPO or not\n # TODO (@arthur): Improve the return only dictinary\n self.full_state[\"obs\"] = next_obs\n if self.use_global_obs:\n self.full_state[\"states\"] = self.env.get_state()\n return self.full_state, reward, is_done, [[], info]\n else:\n return self.full_state[\"obs\"], reward, is_done, [[], info]\n\n\n# register the rl-games adapter to use inside the runner\nvecenv.register('RLGPU', lambda config_name, num_actors, **kwargs: RlGamesGpuEnvAdapter(config_name, num_actors, **kwargs))\nenv_configurations.register('rlgpu', {\n 'vecenv_type': 'RLGPU',\n 'env_creator': lambda **kwargs: create_rlgpu_env(**kwargs),\n})\n\n\nclass LeibnizAlgoObserver(AlgoObserver):\n \"\"\"Allows us to log stats from the env along with the algorithm running stats. \"\"\"\n\n def __init__(self):\n pass\n\n def after_init(self, algo):\n self.algo = algo\n self.game_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)\n self.direct_info = {}\n self.writer = self.algo.writer\n\n def process_infos(self, infos, done_indices):\n if not infos:\n return\n if len(infos) > 0 and isinstance(infos[0], dict):\n for ind in done_indices:\n if len(infos) <= ind // self.algo.num_agents:\n continue\n info = infos[ind // self.algo.num_agents]\n game_res = None\n if 'battle_won' in info:\n game_res = info['battle_won']\n if 'scores' in info:\n game_res = info['scores']\n\n if game_res is not None:\n self.game_scores.update(torch.from_numpy(np.asarray([game_res])).to(self.algo.ppo_device))\n if len(infos) > 1 and isinstance(infos[1], dict): # allow direct logging from env\n self.direct_info = infos[1]\n\n def after_clear_stats(self):\n self.game_scores.clear()\n\n def after_print_stats(self, frame, epoch_num, total_time):\n if self.game_scores.current_size > 0:\n mean_scores = self.game_scores.get_mean()\n self.writer.add_scalar('scores/mean', mean_scores, frame)\n self.writer.add_scalar('scores/iter', mean_scores, epoch_num)\n self.writer.add_scalar('scores/time', mean_scores, total_time)\n for k, v in self.direct_info.items():\n self.writer.add_scalar(k, v, frame)\n\ndef run_rlg_hydra(hydra_cfg):\n global task_cfg, agent_cfg_train, cli_args, logdir, vargs\n from omegaconf import OmegaConf\n task_cfg = OmegaConf.to_container(hydra_cfg.gym)\n agent_cfg_train = OmegaConf.to_container(hydra_cfg.rlg)\n cli_args= hydra_cfg.args\n logdir = cli_args['logdir']\n vargs = OmegaConf.to_container(cli_args)\n run_rlg()\n\n\ndef run_rlg():\n global logdir\n # Create default directories for weights and statistics\n os.makedirs(\"nn\", exist_ok=True)\n os.makedirs(\"runs\", exist_ok=True)\n # set numpy formatting for printing only\n set_np_formatting()\n\n # append the timestamp to logdir\n now = datetime.now()\n now_dir_name = now.strftime(\"%m-%d-%Y-%H-%M-%S\")\n logdir = os.path.join(logdir, now_dir_name)\n os.makedirs(logdir, exist_ok=True)\n # print the common info\n print_notify(f'Saving logs at: {logdir}')\n print_notify(f'Verbosity : {cli_args.verbose}')\n print_notify(f'Seed : {agent_cfg_train[\"seed\"]}')\n # set logdir and seed\n cli_args.logdir = logdir\n set_seed(agent_cfg_train[\"seed\"])\n # print training configuration for debugging\n if cli_args.verbose:\n print_info(f'Agent training configuration: ')\n print_dict(agent_cfg_train)\n print(40 * '-')\n # save agent config into file\n with open(os.path.join(logdir, 'agent_config.yaml'), 'w') as file:\n yaml.dump(agent_cfg_train, file)\n # convert CLI arguments into dictionory\n # create runner and set the settings\n runner = Runner(LeibnizAlgoObserver())\n runner.load(agent_cfg_train)\n runner.reset()\n runner.run(vargs)\n\nif __name__ == '__main__':\n # get CLI arguments\n cli_args = get_args(use_rlg_config=True)\n # parse arguments to load configurations\n task_cfg, agent_cfg_train, logdir = load_cfg(cli_args)\n vargs = vars(cli_args)\n run_rlg()\n\n# EOF\n"
] |
[
[
"numpy.asarray"
]
] |
rjuppa/vmmr
|
[
"a968b869e00fe46ef862fe794b063318a66c894f"
] |
[
"keras/util/augment.py"
] |
[
"#!/usr/bin/env python3\n\n# create a dataset with same size of classes\n# use augmentation if necessary\n\nimport os, math\nfrom os.path import join\n\nimport cv2\nimport numpy as np\nimport random\nimport shutil\n\n\nTOTAL = 5000\nMIN_LIMIT = 50\nparent_path = os.path.abspath('.')\n# root = \"/Users/radekj/devroot/vmmr/datasets/\"\nroot = \"/storage/plzen1/home/radekj/vmmr/datasets\"\n\n\nclass ImageProcessor:\n image = None\n original = None\n width = 320\n height = 240\n mark = \"\"\n filters = [\"lighter\", \"saturation\", \"blur\", \"invert\", \"contrast\",\n \"affinet\", \"grayscale\", \"hist\", \"foggy\", \"rainy\", \"drops\"]\n\n def __init__(self, folder, filename, des_path):\n self.original = cv2.imread(os.path.join(folder, filename), cv2.IMREAD_COLOR)\n self.image = self.original\n self.folder = folder\n self.filename = filename[:-4] + \"_\"\n self.origname = filename[:-4] + \"_\"\n self.des_path = des_path\n\n path = os.path.join(self.des_path, filename)\n cv2.imwrite(path, self.original)\n\n def rnd(self, max):\n return random.randint(1, max)\n\n def get_rand_filter(self, used=None):\n if used is None:\n used = []\n\n ff = self.filters.copy()\n for f in used:\n ff.remove(f)\n\n idx = random.randint(1, len(ff))\n return ff[idx-1]\n\n def mix_it(self, times):\n # iterates filters, use them\n # and apply other filters to augment an image\n counter = 0\n for name in self.filters:\n f1 = name\n used = []\n self.image = self.original\n self.filename = self.origname\n for i in range(7):\n result = self.apply_filter(f1) # image saved\n counter += 1\n if counter >= times:\n return # stop\n\n self.image = result\n used.append(f1)\n f1 = self.get_rand_filter(used)\n\n def apply_filter(self, filter_name):\n if not hasattr(self, filter_name):\n raise AttributeError(\"filter_name\")\n\n method = getattr(self, filter_name)\n result = method()\n self.filename = self.filename + self.mark\n path = os.path.join(self.des_path, self.filename + \".jpg\")\n cv2.imwrite(path, result)\n return result\n\n def lighter(self):\n self.mark = \"L\"\n table = np.array([((i / 255.0) ** 0.8) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n return cv2.LUT(self.image, table)\n\n def saturation(self):\n self.mark = \"S\"\n saturation = 25\n image = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)\n v = image[:, :, 2]\n v = np.where(v <= 255 - saturation, v + saturation, 255)\n image[:, :, 2] = v\n return cv2.cvtColor(image, cv2.COLOR_HSV2BGR)\n\n def blur(self):\n self.mark = \"B\"\n return cv2.blur(self.image, (3, 3))\n\n def invert(self):\n self.mark = \"I\"\n return cv2.bitwise_not(self.image)\n\n def contrast(self):\n self.mark = \"C\"\n lab = cv2.cvtColor(self.image, cv2.COLOR_BGR2LAB)\n l, a, b = cv2.split(lab)\n clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))\n cl = clahe.apply(l)\n limg = cv2.merge((cl, a, b))\n return cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)\n\n def affinet(self):\n self.mark = \"A\"\n pts1 = np.float32([[50, 50], [200, 50], [50, 200]])\n pts2 = np.float32([[50, 45], [195, 45], [55, 205]])\n M = cv2.getAffineTransform(pts1, pts2)\n return cv2.warpAffine(self.image, M, (320, 240))\n\n def grayscale(self):\n self.mark = \"G\"\n gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n return cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)\n\n def hist(self):\n self.mark = \"H\"\n h, s, v = cv2.split(cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV))\n eq_v = cv2.equalizeHist(v)\n return cv2.cvtColor(cv2.merge([h, s, eq_v]), cv2.COLOR_HSV2RGB)\n\n def foggy(self):\n # foggy.jpg => 600x400\n self.mark = \"F\"\n x = random.randint(1, 600-self.width)\n y = random.randint(1, 400-self.height)\n alpha = 0.5\n fog = cv2.imread(os.path.join(root, \"foggy.jpg\"), cv2.IMREAD_COLOR)\n cropped = fog[y:y + self.height, x:x + self.width]\n beta = (1.0 - alpha)\n return cv2.addWeighted(self.image, alpha, cropped, beta, 0.0)\n\n def rainy(self):\n # rainy.jpg => 600x400\n self.mark = \"R\"\n x = random.randint(1, 600-self.width)\n y = random.randint(1, 400-self.height)\n alpha = 0.7\n fog = cv2.imread(os.path.join(root, \"rainy.jpg\"), cv2.IMREAD_COLOR)\n cropped = fog[y:y + self.height, x:x + self.width]\n beta = (1.0 - alpha)\n return cv2.addWeighted(self.image, alpha, cropped, beta, 0.0)\n\n def drops(self):\n # rainy.jpg => 600x400\n self.mark = \"D\"\n x = random.randint(1, 600-self.width)\n y = random.randint(1, 400-self.height)\n alpha = 0.9\n fog = cv2.imread(os.path.join(root, \"drops.jpg\"), cv2.IMREAD_COLOR)\n cropped = fog[y:y + self.height, x:x + self.width]\n beta = (1.0 - alpha)\n return cv2.addWeighted(self.image, alpha, cropped, beta, 0.0)\n\n\ndef just_copy_files(src_path, des_path):\n # take 5 from every model directory\n # and repeat until it is enough\n if not os.path.exists(des_path):\n os.mkdir(des_path)\n\n counter = 0\n while counter < TOTAL:\n for model in os.listdir(src_path):\n src_model_path = join(src_path, model)\n if os.path.isdir(src_model_path):\n\n ii = 0\n # iterate images in model dir\n for name in os.listdir(src_model_path):\n filename = join(src_model_path, name)\n dest_filename = join(des_path, name)\n if not os.path.exists(dest_filename):\n shutil.copy(filename, dest_filename)\n ii += 1\n counter += 1\n\n if counter > TOTAL:\n return # stop\n\n if ii > 5:\n break # go to next directory\n\n\ndef augment_folder(src_path, des_path, k):\n if not os.path.exists(des_path):\n os.mkdir(des_path)\n\n counter = 0\n for model in os.listdir(src_path):\n src_model_path = join(src_path, model)\n if os.path.isdir(src_model_path):\n\n for name in os.listdir(src_model_path):\n filename = join(src_model_path, name)\n if os.path.isfile(filename) and filename[-4:] == \".jpg\":\n # make k-times augmentation\n p = ImageProcessor(src_model_path, name, des_path)\n p.mix_it(k-1)\n\n counter += k\n if counter > TOTAL:\n return # stop\n\n\ndef get_make_count(path) -> int:\n count = 0\n for model in os.listdir(path):\n model_path = join(path, model)\n if os.path.isdir(model_path):\n files = os.listdir(model_path)\n images = list(filter(lambda x: x[-4:] == \".jpg\", files))\n count += len(images)\n\n return count\n\n\nif __name__ == \"__main__\":\n\n source = os.path.join(root, \"dest240x320\")\n dest = os.path.join(root, \"ds5000\")\n makes = sorted(os.listdir(source))\n # makes = [\"Ferrari\", \"Fiat\", \"Subaru\"]\n for make in makes:\n src_make_path = os.path.join(source, make)\n des_make_path = os.path.join(dest, make)\n\n if os.path.exists(src_make_path) and os.path.isdir(src_make_path):\n n = get_make_count(src_make_path)\n if n < MIN_LIMIT:\n # skip small classes\n continue\n\n k = math.ceil(TOTAL / n)\n print(\"{} N={}, K={}\".format(make, n, k))\n\n if k == 1:\n just_copy_files(src_make_path, des_make_path)\n else:\n augment_folder(src_make_path, des_make_path, k)\n"
] |
[
[
"numpy.arange",
"numpy.where",
"numpy.float32"
]
] |
aaguasca/gammapy
|
[
"b1a4e9dbaeec23b3eaca1c874752e92432920a42"
] |
[
"gammapy/modeling/models/spatial.py"
] |
[
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Spatial models.\"\"\"\nimport logging\nimport os\nimport numpy as np\nimport scipy.integrate\nimport scipy.special\nimport astropy.units as u\nfrom astropy.coordinates import Angle, SkyCoord\nfrom astropy.coordinates.angle_utilities import angular_separation, position_angle\nfrom astropy.utils import lazyproperty\nfrom regions import (\n CircleAnnulusSkyRegion,\n CircleSkyRegion,\n EllipseSkyRegion,\n PointSkyRegion,\n RectangleSkyRegion,\n)\nfrom gammapy.maps import Map, WcsGeom\nfrom gammapy.modeling import Parameter\nfrom gammapy.utils.gauss import Gauss2DPDF\nfrom gammapy.utils.scripts import make_path\nfrom .core import ModelBase\n\n\n__all__ = [\n \"ConstantFluxSpatialModel\",\n \"ConstantSpatialModel\",\n \"DiskSpatialModel\",\n \"GaussianSpatialModel\",\n \"GeneralizedGaussianSpatialModel\",\n \"PointSpatialModel\",\n \"Shell2SpatialModel\",\n \"ShellSpatialModel\",\n \"SpatialModel\",\n \"TemplateSpatialModel\",\n]\n\n\nlog = logging.getLogger(__name__)\n\nMAX_OVERSAMPLING = 200\n\n\n\ndef compute_sigma_eff(lon_0, lat_0, lon, lat, phi, major_axis, e):\n \"\"\"Effective radius, used for the evaluation of elongated models\"\"\"\n phi_0 = position_angle(lon_0, lat_0, lon, lat)\n d_phi = phi - phi_0\n minor_axis = Angle(major_axis * np.sqrt(1 - e ** 2))\n\n a2 = (major_axis * np.sin(d_phi)) ** 2\n b2 = (minor_axis * np.cos(d_phi)) ** 2\n denominator = np.sqrt(a2 + b2)\n sigma_eff = major_axis * minor_axis / denominator\n return minor_axis, sigma_eff\n\n\nclass SpatialModel(ModelBase):\n \"\"\"Spatial model base class.\"\"\"\n\n _type = \"spatial\"\n\n def __init__(self, **kwargs):\n frame = kwargs.pop(\"frame\", \"icrs\")\n super().__init__(**kwargs)\n if not hasattr(self, \"frame\"):\n self.frame = frame\n\n def __call__(self, lon, lat, energy=None):\n \"\"\"Call evaluate method\"\"\"\n kwargs = {par.name: par.quantity for par in self.parameters}\n\n if energy is None and self.is_energy_dependent:\n raise ValueError(\"Missing energy value for evaluation\")\n\n if energy is not None:\n kwargs[\"energy\"] = energy\n\n return self.evaluate(lon, lat, **kwargs)\n\n @property\n def evaluation_bin_size_min(self):\n return None\n\n # TODO: make this a hard-coded class attribute?\n @lazyproperty\n def is_energy_dependent(self):\n varnames = self.evaluate.__code__.co_varnames\n return \"energy\" in varnames\n\n @property\n def position(self):\n \"\"\"Spatial model center position (`SkyCoord`)\"\"\"\n lon = self.lon_0.quantity\n lat = self.lat_0.quantity\n return SkyCoord(lon, lat, frame=self.frame)\n\n @position.setter\n def position(self, skycoord):\n \"\"\"Spatial model center position\"\"\"\n coord = skycoord.transform_to(self.frame)\n self.lon_0.quantity = coord.data.lon\n self.lat_0.quantity = coord.data.lat\n\n @property\n def position_lonlat(self):\n \"\"\"Spatial model center position `(lon, lat)` in rad and frame of the model\"\"\"\n lon = self.lon_0.quantity.to_value(u.rad)\n lat = self.lat_0.quantity.to_value(u.rad)\n return lon, lat\n\n # TODO: get rid of this!\n _phi_0 = 0.0\n\n @property\n def phi_0(self):\n return self._phi_0\n\n @phi_0.setter\n def phi_0(self, phi_0=0.0):\n self._phi_0 = phi_0\n\n @property\n def position_error(self):\n \"\"\"Get 95% containment position error as (`~regions.EllipseSkyRegion`)\"\"\"\n if self.covariance is None:\n return EllipseSkyRegion(\n center=self.position,\n height=np.nan * u.deg,\n width=np.nan * u.deg,\n angle=np.nan * u.deg,\n )\n\n pars = self.parameters\n sub_covar = self.covariance.get_subcovariance([\"lon_0\", \"lat_0\"]).data.copy()\n cos_lat = np.cos(self.lat_0.quantity.to_value(\"rad\"))\n sub_covar[0, 0] *= cos_lat ** 2.0\n sub_covar[0, 1] *= cos_lat\n sub_covar[1, 0] *= cos_lat\n eig_vals, eig_vecs = np.linalg.eig(sub_covar)\n lon_err, lat_err = np.sqrt(eig_vals)\n y_vec = eig_vecs[:, 0]\n phi = (np.arctan2(y_vec[1], y_vec[0]) * u.rad).to(\"deg\") + self.phi_0\n err = np.sort([lon_err, lat_err])\n scale_r95 = Gauss2DPDF(sigma=1).containment_radius(0.95)\n err *= scale_r95\n if err[1] == lon_err * scale_r95:\n phi += 90 * u.deg\n height = 2 * err[1] * pars[\"lon_0\"].unit\n width = 2 * err[0] * pars[\"lat_0\"].unit\n else:\n height = 2 * err[1] * pars[\"lat_0\"].unit\n width = 2 * err[0] * pars[\"lon_0\"].unit\n\n return EllipseSkyRegion(\n center=self.position, height=height, width=width, angle=phi\n )\n\n def evaluate_geom(self, geom):\n \"\"\"Evaluate model on `~gammapy.maps.Geom`\n\n Parameters\n ----------\n geom : `~gammapy.maps.WcsGeom`\n\n Returns\n -------\n `~gammapy.maps.Map`\n\n \"\"\"\n coords = geom.get_coord(frame=self.frame, sparse=True)\n\n if self.is_energy_dependent:\n return self(coords.lon, coords.lat, energy=coords[\"energy_true\"])\n else:\n return self(coords.lon, coords.lat)\n\n def integrate_geom(self, geom, oversampling_factor=None):\n \"\"\"Integrate model on `~gammapy.maps.Geom` or `~gammapy.maps.RegionGeom`.\n\n Integration is performed by simple rectangle approximation, the pixel center model value\n is multiplied by the pixel solid angle.\n An oversampling factor can be used for precision. By default, this parameter is set to None\n and an oversampling factor is automatically estimated based on the model estimation maximal\n bin width.\n\n For a RegionGeom, the model is integrated on a tangent WCS projection in the region.\n\n Parameters\n ----------\n geom : `~gammapy.maps.WcsGeom` or `~gammapy.maps.RegionGeom`\n The geom on which the integration is performed\n oversampling_factor : int or None\n The oversampling factor to use for integration.\n Default is None: the factor is estimated from the model minimimal bin size\n\n Returns\n -------\n `~gammapy.maps.Map` or `gammapy.maps.RegionNDMap`, containing\n the integral value in each spatial bin.\n \"\"\"\n wcs_geom = geom\n mask = None\n\n if geom.is_region:\n wcs_geom = geom.to_wcs_geom().to_image()\n\n result = Map.from_geom(geom=wcs_geom)\n\n pix_scale = np.max(wcs_geom.pixel_scales.to_value(\"deg\"))\n if oversampling_factor is None:\n if self.evaluation_bin_size_min is not None:\n res_scale = self.evaluation_bin_size_min.to_value(\"deg\")\n if res_scale > 0:\n oversampling_factor = np.minimum(\n int(np.ceil(pix_scale / res_scale)), MAX_OVERSAMPLING\n )\n else:\n oversampling_factor = MAX_OVERSAMPLING\n else:\n oversampling_factor = 1\n\n if oversampling_factor > 1:\n if self.evaluation_radius is not None:\n # Is it still needed?\n width = 2 * np.maximum(\n self.evaluation_radius.to_value(\"deg\"), pix_scale\n )\n wcs_geom = wcs_geom.cutout(self.position, width)\n\n upsampled_geom = wcs_geom.upsample(oversampling_factor, axis_name=None)\n\n # assume the upsampled solid angles are approximately factor**2 smaller\n values = self.evaluate_geom(upsampled_geom) / oversampling_factor ** 2\n upsampled = Map.from_geom(upsampled_geom, unit=values.unit)\n upsampled += values\n\n if geom.is_region:\n mask = geom.contains(upsampled_geom.get_coord()).astype(\"int\")\n\n integrated = upsampled.downsample(\n oversampling_factor, preserve_counts=True, weights=mask\n )\n\n # Finally stack result\n result.unit = integrated.unit\n result.stack(integrated)\n else:\n values = self.evaluate_geom(wcs_geom)\n result.unit = values.unit\n result += values\n\n result *= result.geom.solid_angle()\n\n if geom.is_region:\n mask = result.geom.region_mask([geom.region])\n result = Map.from_geom(\n geom, data=np.sum(result.data[mask]), unit=result.unit\n )\n return result\n\n def to_dict(self, full_output=False):\n \"\"\"Create dict for YAML serilisation\"\"\"\n data = super().to_dict(full_output)\n data[\"spatial\"][\"frame\"] = self.frame\n data[\"spatial\"][\"parameters\"] = data[\"spatial\"].pop(\"parameters\")\n return data\n\n def _get_plot_map(self, geom):\n if self.evaluation_radius is None and geom is None:\n raise ValueError(\n f\"{self.__class__.__name__} requires geom to be defined for plotting.\"\n )\n\n if geom is None:\n width = 2 * max(self.evaluation_radius, 0.1 * u.deg)\n geom = WcsGeom.create(\n skydir=self.position, frame=self.frame, width=width, binsz=0.02\n )\n data = self.evaluate_geom(geom)\n return Map.from_geom(geom, data=data.value, unit=data.unit)\n\n def plot(self, ax=None, geom=None, **kwargs):\n \"\"\"Plot spatial model.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n geom : `~gammapy.maps.WcsGeom`, optional\n Geom to use for plotting.\n **kwargs : dict\n Keyword arguments passed to `~gammapy.maps.WcsMap.plot()`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n \"\"\"\n m = self._get_plot_map(geom)\n if not m.geom.is_flat:\n raise TypeError(\n \"Use .plot_interactive() or .plot_grid() for Map dimension > 2\"\n )\n return m.plot(ax=ax, **kwargs)\n\n def plot_interative(self, ax=None, geom=None, **kwargs):\n \"\"\"Plot spatial model.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n geom : `~gammapy.maps.WcsGeom`, optional\n Geom to use for plotting.\n **kwargs : dict\n Keyword arguments passed to `~gammapy.maps.WcsMap.plot()`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n \"\"\"\n\n m = self._get_plot_map(geom)\n if m.geom.is_image:\n raise TypeError(\"Use .plot() for 2D Maps\")\n m.plot_interactive(ax=ax, **kwargs)\n\n def plot_error(self, ax=None, **kwargs):\n \"\"\"Plot position error\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n **kwargs : dict\n Keyword arguments passed to `~gammapy.maps.WcsMap.plot()`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n \"\"\"\n import matplotlib.pyplot as plt\n\n # plot center position\n lon, lat = self.lon_0.value, self.lat_0.value\n\n ax = plt.gca() if ax is None else ax\n\n kwargs.setdefault(\"marker\", \"x\")\n kwargs.setdefault(\"color\", \"red\")\n kwargs.setdefault(\"label\", \"position\")\n\n ax.scatter(lon, lat, transform=ax.get_transform(self.frame), **kwargs)\n\n # plot position error\n if not np.all(self.covariance.data == 0):\n region = self.position_error.to_pixel(ax.wcs)\n artist = region.as_artist(facecolor=\"none\", edgecolor=kwargs[\"color\"])\n ax.add_artist(artist)\n\n return ax\n\n def plot_grid(self, geom=None, **kwargs):\n \"\"\"Plot spatial model energy slices in a grid.\n\n Parameters\n ----------\n geom : `~gammapy.maps.WcsGeom`, optional\n Geom to use for plotting.\n **kwargs : dict\n Keyword arguments passed to `~gammapy.maps.WcsMap.plot()`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n \"\"\"\n\n if (geom is None) or geom.is_image:\n raise TypeError(\"Use .plot() for 2D Maps\")\n m = self._get_plot_map(geom)\n m.plot_grid(**kwargs)\n\n @classmethod\n def from_position(cls, position, **kwargs):\n \"\"\"Define the position of the model using a sky coord\n\n Parameters\n ----------\n position : `SkyCoord`\n Position\n\n Returns\n -------\n model : `SpatialModel`\n Spatial model\n \"\"\"\n lon_0, lat_0 = position.data.lon, position.data.lat\n return cls(lon_0=lon_0, lat_0=lat_0, frame=position.frame, **kwargs)\n\n @property\n def evaluation_radius(self):\n \"\"\"Evaluation radius\"\"\"\n return None\n\n @property\n def evaluation_region(self):\n \"\"\"Evaluation region\"\"\"\n\n if hasattr(self, \"to_region\"):\n return self.to_region()\n elif self.evaluation_radius is not None:\n return CircleSkyRegion(\n center=self.position,\n radius=self.evaluation_radius,\n )\n else:\n return None\n\n\nclass PointSpatialModel(SpatialModel):\n r\"\"\"Point Source.\n\n For more information see :ref:`point-spatial-model`.\n\n Parameters\n ----------\n lon_0, lat_0 : `~astropy.coordinates.Angle`\n Center position\n frame : {\"icrs\", \"galactic\"}\n Center position coordinate frame\n \"\"\"\n\n tag = [\"PointSpatialModel\", \"point\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n is_energy_dependent = False\n\n @property\n def evaluation_bin_size_min(self):\n \"\"\"Minimal evaluation bin size (`~astropy.coordinates.Angle`).\"\"\"\n return 0 * u.deg\n\n @property\n def evaluation_radius(self):\n \"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n\n Set as zero degrees.\n \"\"\"\n return 0 * u.deg\n\n @staticmethod\n def _grid_weights(x, y, x0, y0):\n \"\"\"Compute 4-pixel weights such that centroid is preserved.\"\"\"\n dx = np.abs(x - x0)\n dx = np.where(dx < 1, 1 - dx, 0)\n\n dy = np.abs(y - y0)\n dy = np.where(dy < 1, 1 - dy, 0)\n\n return dx * dy\n\n def is_energy_dependent(self):\n return False\n\n def evaluate_geom(self, geom):\n \"\"\"Evaluate model on `~gammapy.maps.Geom`.\"\"\"\n values = self.integrate_geom(geom).data\n return values / geom.solid_angle()\n\n def integrate_geom(self, geom, oversampling_factor=None):\n \"\"\"Integrate model on `~gammapy.maps.Geom`\n\n Parameters\n ----------\n geom : `Geom`\n Map geometry\n\n Returns\n -------\n flux : `Map`\n Predicted flux map\n \"\"\"\n geom_image = geom.to_image()\n if geom.is_hpx:\n idx, weights = geom_image.interp_weights({\"skycoord\": self.position})\n data = np.zeros(geom_image.data_shape)\n data[tuple(idx)] = weights\n else:\n x, y = geom_image.get_pix()\n x0, y0 = self.position.to_pixel(geom.wcs)\n data = self._grid_weights(x, y, x0, y0)\n return Map.from_geom(geom=geom_image, data=data, unit=\"\")\n\n def to_region(self, **kwargs):\n \"\"\"Model outline (`~regions.PointSkyRegion`).\"\"\"\n return PointSkyRegion(center=self.position, **kwargs)\n\n\nclass GaussianSpatialModel(SpatialModel):\n r\"\"\"Two-dimensional Gaussian model.\n\n For more information see :ref:`gaussian-spatial-model`.\n\n Parameters\n ----------\n lon_0, lat_0 : `~astropy.coordinates.Angle`\n Center position\n sigma : `~astropy.coordinates.Angle`\n Length of the major semiaxis of the Gaussian, in angular units.\n e : `float`\n Eccentricity of the Gaussian (:math:`0< e< 1`).\n phi : `~astropy.coordinates.Angle`\n Rotation angle :math:`\\phi`: of the major semiaxis.\n Increases counter-clockwise from the North direction.\n frame : {\"icrs\", \"galactic\"}\n Center position coordinate frame\n \"\"\"\n\n tag = [\"GaussianSpatialModel\", \"gauss\"]\n\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n sigma = Parameter(\"sigma\", \"1 deg\", min=0)\n e = Parameter(\"e\", 0, min=0, max=1, frozen=True)\n phi = Parameter(\"phi\", \"0 deg\", frozen=True)\n\n @property\n def evaluation_bin_size_min(self):\n \"\"\"Minimal evaluation bin size (`~astropy.coordinates.Angle`) chosen as sigma/3.\"\"\"\n return self.parameters[\"sigma\"].quantity / 3.0\n\n @property\n def evaluation_radius(self):\n r\"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n\n Set as :math:`5\\sigma`.\n \"\"\"\n return 5 * self.parameters[\"sigma\"].quantity\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, sigma, e, phi):\n \"\"\"Evaluate model.\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n\n if e == 0:\n a = 1.0 - np.cos(sigma)\n norm = (1 / (4 * np.pi * a * (1.0 - np.exp(-1.0 / a)))).value\n else:\n minor_axis, sigma_eff = compute_sigma_eff(\n lon_0, lat_0, lon, lat, phi, sigma, e\n )\n a = 1.0 - np.cos(sigma_eff)\n norm = (1 / (2 * np.pi * sigma * minor_axis)).to_value(\"sr-1\")\n\n exponent = -0.5 * ((1 - np.cos(sep)) / a)\n return u.Quantity(norm * np.exp(exponent).value, \"sr-1\", copy=False)\n\n def to_region(self, x_sigma=1.5, **kwargs):\n r\"\"\"Model outline at a given number of :math:`\\sigma`.\n\n Parameters\n ----------\n x_sigma : float\n Number of :math:`\\sigma\n Default is :math:`1.5\\sigma` which corresponds to about 68%\n containment for a 2D symmetric Gaussian.\n\n Returns\n -------\n region : `~regions.EllipseSkyRegion`\n Model outline.\n \"\"\"\n\n minor_axis = Angle(self.sigma.quantity * np.sqrt(1 - self.e.quantity ** 2))\n return EllipseSkyRegion(\n center=self.position,\n height=2 * x_sigma * self.sigma.quantity,\n width=2 * x_sigma * minor_axis,\n angle=self.phi.quantity,\n **kwargs,\n )\n\n @property\n def evaluation_region(self):\n \"\"\"Evaluation region consistent with evaluation radius\"\"\"\n return self.to_region(x_sigma=5)\n\n\nclass GeneralizedGaussianSpatialModel(SpatialModel):\n r\"\"\"Two-dimensional Generealized Gaussian model.\n\n For more information see :ref:`generalized-gaussian-spatial-model`.\n\n Parameters\n ----------\n lon_0, lat_0 : `~astropy.coordinates.Angle`\n Center position\n r_0 : `~astropy.coordinates.Angle`\n Length of the major semiaxis, in angular units.\n eta : `float`\n Shape parameter whitin (0, 1]. Special cases for disk: ->0, Gaussian: 0.5, Laplace:1\n e : `float`\n Eccentricity (:math:`0< e< 1`).\n phi : `~astropy.coordinates.Angle`\n Rotation angle :math:`\\phi`: of the major semiaxis.\n Increases counter-clockwise from the North direction.\n frame : {\"icrs\", \"galactic\"}\n Center position coordinate frame\n \"\"\"\n\n tag = [\"GeneralizedGaussianSpatialModel\", \"gauss-general\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n r_0 = Parameter(\"r_0\", \"1 deg\")\n eta = Parameter(\"eta\", 0.5, min=0.01, max=1.0)\n e = Parameter(\"e\", 0.0, min=0.0, max=1.0, frozen=True)\n phi = Parameter(\"phi\", \"0 deg\", frozen=True)\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, r_0, eta, e, phi):\n sep = angular_separation(lon, lat, lon_0, lat_0)\n if isinstance(eta, u.Quantity):\n eta = eta.value # gamma function does not allow quantities\n minor_axis, r_eff = compute_sigma_eff(lon_0, lat_0, lon, lat, phi, r_0, e)\n z = sep / r_eff\n norm = 1 / (2 * np.pi * minor_axis * r_0 * eta * scipy.special.gamma(2 * eta))\n return (norm * np.exp(-(z ** (1 / eta)))).to(\"sr-1\")\n\n @property\n def evaluation_bin_size_min(self):\n \"\"\"Minimal evaluation bin size (`~astropy.coordinates.Angle`).\n\n The bin min size is defined as r_0/(3+8*eta)/(e+1).\n \"\"\"\n return self.r_0.quantity / (3 + 8 * self.eta.value) / (self.e.value + 1)\n\n @property\n def evaluation_radius(self):\n r\"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n The evaluation radius is defined as r_eval = r_0*(1+8*eta) so it verifies:\n r_eval -> r_0 if eta -> 0\n r_eval = 5*r_0 > 5*sigma_gauss = 5*r_0/sqrt(2) ~ 3.5*r_0 if eta=0.5\n r_eval = 9*r_0 > 5*sigma_laplace = 5*sqrt(2)*r_0 ~ 7*r_0 if eta = 1\n r_eval -> inf if eta -> inf\n \"\"\"\n return self.r_0.quantity * (1 + 8 * self.eta.value)\n\n def to_region(self, x_r_0=1, **kwargs):\n \"\"\"Model outline at a given number of r_0.\n\n Parameters\n ----------\n x_r_0 : float\n Number of r_0 (Default is 1).\n\n Returns\n -------\n region : `~regions.EllipseSkyRegion`\n Model outline.\n \"\"\"\n\n minor_axis = Angle(self.r_0.quantity * np.sqrt(1 - self.e.quantity ** 2))\n return EllipseSkyRegion(\n center=self.position,\n height=2 * x_r_0 * self.r_0.quantity,\n width=2 * x_r_0 * minor_axis,\n angle=self.phi.quantity,\n **kwargs,\n )\n\n @property\n def evaluation_region(self):\n \"\"\"Evaluation region consistent with evaluation radius\"\"\"\n scale = self.evaluation_radius / self.r_0.quantity\n return self.to_region(x_r_0=scale)\n\n\nclass DiskSpatialModel(SpatialModel):\n r\"\"\"Constant disk model.\n\n For more information see :ref:`disk-spatial-model`.\n\n Parameters\n ----------\n lon_0, lat_0 : `~astropy.coordinates.Angle`\n Center position\n r_0 : `~astropy.coordinates.Angle`\n :math:`a`: length of the major semiaxis, in angular units.\n e : `float`\n Eccentricity of the ellipse (:math:`0< e< 1`).\n phi : `~astropy.coordinates.Angle`\n Rotation angle :math:`\\phi`: of the major semiaxis.\n Increases counter-clockwise from the North direction.\n edge_width : float\n Width of the edge. The width is defined as the range within which\n the smooth edge of the model drops from 95% to 5% of its amplitude.\n It is given as fraction of r_0.\n frame : {\"icrs\", \"galactic\"}\n Center position coordinate frame\n \"\"\"\n\n tag = [\"DiskSpatialModel\", \"disk\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n r_0 = Parameter(\"r_0\", \"1 deg\", min=0)\n e = Parameter(\"e\", 0, min=0, max=1, frozen=True)\n phi = Parameter(\"phi\", \"0 deg\", frozen=True)\n edge_width = Parameter(\"edge_width\", value=0.01, min=0, max=1, frozen=True)\n\n @property\n def evaluation_bin_size_min(self):\n \"\"\"Minimal evaluation bin size (`~astropy.coordinates.Angle`).\n\n The bin min size is defined as r_0*(1-edge_width)/10.\n \"\"\"\n return self.r_0.quantity * (1 - self.edge_width.quantity) / 10.0\n\n @property\n def evaluation_radius(self):\n \"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n\n Set to the length of the semi-major axis plus the edge width.\n \"\"\"\n return 1.1 * self.r_0.quantity * (1 + self.edge_width.quantity)\n\n @staticmethod\n def _evaluate_norm_factor(r_0, e):\n \"\"\"Compute the normalization factor.\"\"\"\n semi_minor = r_0 * np.sqrt(1 - e ** 2)\n\n def integral_fcn(x, a, b):\n A = 1 / np.sin(a) ** 2\n B = 1 / np.sin(b) ** 2\n C = A - B\n cs2 = np.cos(x) ** 2\n\n return 1 - np.sqrt(1 - 1 / (B + C * cs2))\n\n return (\n 2\n * scipy.integrate.quad(\n lambda x: integral_fcn(x, r_0, semi_minor), 0, np.pi\n )[0]\n ) ** -1\n\n @staticmethod\n def _evaluate_smooth_edge(x, width):\n value = (x / width).to_value(\"\")\n edge_width_95 = 2.326174307353347\n return 0.5 * (1 - scipy.special.erf(value * edge_width_95))\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, r_0, e, phi, edge_width):\n \"\"\"Evaluate model.\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n\n if e == 0:\n sigma_eff = r_0\n else:\n sigma_eff = compute_sigma_eff(lon_0, lat_0, lon, lat, phi, r_0, e)[1]\n\n norm = DiskSpatialModel._evaluate_norm_factor(r_0, e)\n\n in_ellipse = DiskSpatialModel._evaluate_smooth_edge(\n sep - sigma_eff, sigma_eff * edge_width\n )\n return u.Quantity(norm * in_ellipse, \"sr-1\", copy=False)\n\n def to_region(self, **kwargs):\n \"\"\"Model outline (`~regions.EllipseSkyRegion`).\"\"\"\n minor_axis = Angle(self.r_0.quantity * np.sqrt(1 - self.e.quantity ** 2))\n return EllipseSkyRegion(\n center=self.position,\n height=2 * self.r_0.quantity,\n width=2 * minor_axis,\n angle=self.phi.quantity,\n **kwargs,\n )\n\n\nclass ShellSpatialModel(SpatialModel):\n r\"\"\"Shell model.\n\n For more information see :ref:`shell-spatial-model`.\n\n Parameters\n ----------\n lon_0, lat_0 : `~astropy.coordinates.Angle`\n Center position\n radius : `~astropy.coordinates.Angle`\n Inner radius, :math:`r_{in}`\n width : `~astropy.coordinates.Angle`\n Shell width\n frame : {\"icrs\", \"galactic\"}\n Center position coordinate frame\n\n See Also\n --------\n Shell2SpatialModel\n \"\"\"\n\n tag = [\"ShellSpatialModel\", \"shell\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n radius = Parameter(\"radius\", \"1 deg\")\n width = Parameter(\"width\", \"0.2 deg\")\n\n @property\n def evaluation_bin_size_min(self):\n \"\"\"Minimal evaluation bin size (`~astropy.coordinates.Angle`).\n\n The bin min size is defined as the shell width.\n \"\"\"\n return self.width.quantity\n\n @property\n def evaluation_radius(self):\n r\"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n\n Set to :math:`r_\\text{out}`.\n \"\"\"\n return self.radius.quantity + self.width.quantity\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, radius, width):\n \"\"\"Evaluate model.\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n radius_out = radius + width\n\n norm = 3 / (2 * np.pi * (radius_out ** 3 - radius ** 3))\n\n with np.errstate(invalid=\"ignore\"):\n # np.where and np.select do not work with quantities, so we use the\n # workaround with indexing\n value = np.sqrt(radius_out ** 2 - sep ** 2)\n mask = sep < radius\n value[mask] = (value - np.sqrt(radius ** 2 - sep ** 2))[mask]\n value[sep > radius_out] = 0\n\n return norm * value\n\n def to_region(self, **kwargs):\n \"\"\"Model outline (`~regions.CircleAnnulusSkyRegion`).\"\"\"\n return CircleAnnulusSkyRegion(\n center=self.position,\n inner_radius=self.radius.quantity,\n outer_radius=self.radius.quantity + self.width.quantity,\n **kwargs,\n )\n\n\nclass Shell2SpatialModel(SpatialModel):\n r\"\"\"Shell model with outer radius and relative width parametrization\n\n For more information see :ref:`shell2-spatial-model`.\n\n Parameters\n ----------\n lon_0, lat_0 : `~astropy.coordinates.Angle`\n Center position\n r_0 : `~astropy.coordinates.Angle`\n Outer radius, :math:`r_{out}`\n eta : float\n Shell width relative to outer radius, r_0, should be within (0,1]\n frame : {\"icrs\", \"galactic\"}\n Center position coordinate frame\n\n See Also\n --------\n ShellSpatialModel\n \"\"\"\n\n tag = [\"Shell2SpatialModel\", \"shell2\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n r_0 = Parameter(\"r_0\", \"1 deg\")\n eta = Parameter(\"eta\", 0.2, min=0.02, max=1)\n\n @property\n def evaluation_bin_size_min(self):\n \"\"\"Minimal evaluation bin size (`~astropy.coordinates.Angle`).\n\n The bin min size is defined as r_0*eta.\n \"\"\"\n return self.eta.value * self.r_0\n\n @property\n def evaluation_radius(self):\n r\"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n\n Set to :math:`r_\\text{out}`.\n \"\"\"\n return self.r_0.quantity\n\n @property\n def r_in(self):\n return (1 - self.eta.quantity) * self.r_0.quantity\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, r_0, eta):\n \"\"\"Evaluate model.\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n r_in = (1 - eta) * r_0\n\n norm = 3 / (2 * np.pi * (r_0 ** 3 - r_in ** 3))\n\n with np.errstate(invalid=\"ignore\"):\n # np.where and np.select do not work with quantities, so we use the\n # workaround with indexing\n value = np.sqrt(r_0 ** 2 - sep ** 2)\n mask = sep < r_in\n value[mask] = (value - np.sqrt(r_in ** 2 - sep ** 2))[mask]\n value[sep > r_0] = 0\n\n return norm * value\n\n def to_region(self, **kwargs):\n \"\"\"Model outline (`~regions.CircleAnnulusSkyRegion`).\"\"\"\n return CircleAnnulusSkyRegion(\n center=self.position,\n inner_radius=self.r_in,\n outer_radius=self.r_0.quantity,\n **kwargs,\n )\n\n\nclass ConstantSpatialModel(SpatialModel):\n \"\"\"Spatially constant (isotropic) spatial model.\n\n For more information see :ref:`constant-spatial-model`.\n\n Parameters\n ----------\n value : `~astropy.units.Quantity`\n Value\n \"\"\"\n\n tag = [\"ConstantSpatialModel\", \"const\"]\n value = Parameter(\"value\", \"1 sr-1\", frozen=True)\n\n frame = \"icrs\"\n evaluation_radius = None\n position = None\n\n def to_dict(self, full_output=False):\n \"\"\"Create dict for YAML serilisation\"\"\"\n # redefined to ignore frame attribute from parent class\n data = super().to_dict(full_output)\n data[\"spatial\"].pop(\"frame\")\n data[\"spatial\"][\"parameters\"] = []\n return data\n\n @staticmethod\n def evaluate(lon, lat, value):\n \"\"\"Evaluate model.\"\"\"\n return value\n\n @staticmethod\n def to_region(**kwargs):\n \"\"\"Model outline (`~regions.EllipseSkyRegion`).\"\"\"\n return EllipseSkyRegion(\n center=SkyCoord(np.nan * u.deg, np.nan * u.deg),\n height=np.nan * u.deg,\n width=np.nan * u.deg,\n angle=np.nan * u.deg,\n **kwargs,\n )\n\n\nclass ConstantFluxSpatialModel(SpatialModel):\n \"\"\"Spatially constant flux spatial model.\n\n For more information see :ref:`constant-spatial-model`.\n\n \"\"\"\n\n tag = [\"ConstantFluxSpatialModel\", \"const-flux\"]\n\n frame = \"icrs\"\n evaluation_radius = None\n position = None\n\n def to_dict(self, full_output=False):\n \"\"\"Create dict for YAML serilisation\"\"\"\n # redefined to ignore frame attribute from parent class\n data = super().to_dict(full_output)\n data[\"spatial\"].pop(\"frame\")\n return data\n\n @staticmethod\n def evaluate(lon, lat):\n \"\"\"Evaluate model.\"\"\"\n return 1 / u.sr\n\n @staticmethod\n def evaluate_geom(geom):\n \"\"\"Evaluate model.\"\"\"\n return 1 / geom.solid_angle()\n\n @staticmethod\n def integrate_geom(geom, oversampling_factor=None):\n \"\"\"Evaluate model.\"\"\"\n return Map.from_geom(geom=geom, data=1)\n\n @staticmethod\n def to_region(**kwargs):\n \"\"\"Model outline (`~regions.EllipseSkyRegion`).\"\"\"\n return EllipseSkyRegion(\n center=SkyCoord(np.nan * u.deg, np.nan * u.deg),\n height=np.nan * u.deg,\n width=np.nan * u.deg,\n angle=np.nan * u.deg,\n **kwargs,\n )\n\n\nclass TemplateSpatialModel(SpatialModel):\n \"\"\"Spatial sky map template model.\n\n For more information see :ref:`template-spatial-model`.\n\n Parameters\n ----------\n map : `~gammapy.maps.Map`\n Map template.\n meta : dict, optional\n Meta information, meta['filename'] will be used for serialization\n normalize : bool\n Normalize the input map so that it integrates to unity.\n interp_kwargs : dict\n Interpolation keyword arguments passed to `gammapy.maps.Map.interp_by_coord`.\n Default arguments are {'interp': 'linear', 'fill_value': 0}.\n \"\"\"\n\n tag = [\"TemplateSpatialModel\", \"template\"]\n\n def __init__(\n self,\n map,\n meta=None,\n normalize=True,\n interp_kwargs=None,\n filename=None,\n ):\n if (map.data < 0).any():\n log.warning(\"Map has negative values. Check and fix this!\")\n\n if filename is not None:\n filename = str(make_path(filename))\n\n self.normalize = normalize\n\n if normalize:\n # Normalize the diffuse map model so that it integrates to unity\n if map.geom.is_image:\n data_sum = map.data.sum()\n else:\n # Normalize in each energy bin\n data_sum = map.data.sum(axis=(1, 2)).reshape((-1, 1, 1))\n\n data = map.data / data_sum\n data /= map.geom.solid_angle().to_value(\"sr\")\n map = map.copy(data=data, unit=\"sr-1\")\n\n if map.unit.is_equivalent(\"\"):\n map = map.copy(unit=\"sr-1\")\n log.warning(\"Missing spatial template unit, assuming sr^-1\")\n\n self._map = map.copy()\n\n self.meta = {} if meta is None else meta\n interp_kwargs = {} if interp_kwargs is None else interp_kwargs\n interp_kwargs.setdefault(\"method\", \"linear\")\n interp_kwargs.setdefault(\"fill_value\", 0)\n self._interp_kwargs = interp_kwargs\n self.filename = filename\n super().__init__()\n\n @property\n def map(self):\n \"\"\"Template map (`~gammapy.maps.Map`)\"\"\"\n return self._map\n\n @property\n def is_energy_dependent(self):\n return \"energy_true\" in self.map.geom.axes.names\n\n @property\n def evaluation_radius(self):\n \"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n\n Set to half of the maximal dimension of the map.\n \"\"\"\n return np.max(self.map.geom.width) / 2.0\n\n @classmethod\n def read(cls, filename, normalize=True, **kwargs):\n \"\"\"Read spatial template model from FITS image.\n If unit is not given in the FITS header the default is ``sr-1``.\n\n Parameters\n ----------\n filename : str\n FITS image filename.\n normalize : bool\n Normalize the input map so that it integrates to unity.\n kwargs : dict\n Keyword arguments passed to `Map.read()`.\n \"\"\"\n m = Map.read(filename, **kwargs)\n return cls(m, normalize=normalize, filename=filename)\n\n def evaluate(self, lon, lat, energy=None):\n coord = {\n \"lon\": lon.to_value(\"deg\"),\n \"lat\": lat.to_value(\"deg\"),\n }\n if energy is not None:\n coord[\"energy_true\"] = energy\n\n val = self.map.interp_by_coord(coord, **self._interp_kwargs)\n return u.Quantity(val, self.map.unit, copy=False)\n\n @property\n def position(self):\n \"\"\"`~astropy.coordinates.SkyCoord`\"\"\"\n return self.map.geom.center_skydir\n\n @property\n def position_lonlat(self):\n \"\"\"Spatial model center position `(lon, lat)` in rad and frame of the model\"\"\"\n lon = self.position.data.lon.rad\n lat = self.position.data.lat.rad\n return lon, lat\n\n @property\n def frame(self):\n return self.position.frame.name\n\n @classmethod\n def from_dict(cls, data):\n data = data[\"spatial\"]\n filename = data[\"filename\"]\n normalize = data.get(\"normalize\", True)\n m = Map.read(filename)\n return cls(m, normalize=normalize, filename=filename)\n\n def to_dict(self, full_output=False):\n \"\"\"Create dict for YAML serilisation\"\"\"\n data = super().to_dict(full_output)\n data[\"spatial\"][\"filename\"] = self.filename\n data[\"spatial\"][\"normalize\"] = self.normalize\n data[\"spatial\"][\"unit\"] = str(self.map.unit)\n return data\n\n def write(self, overwrite=False):\n if self.filename is None:\n raise IOError(\"Missing filename\")\n elif os.path.isfile(self.filename) and not overwrite:\n log.warning(\"Template file already exits, and overwrite is False\")\n else:\n self.map.write(self.filename)\n\n def to_region(self, **kwargs):\n \"\"\"Model outline from template map boundary (`~regions.RectangleSkyRegion`).\"\"\"\n return RectangleSkyRegion(\n center=self.map.geom.center_skydir,\n width=self.map.geom.width[0][0],\n height=self.map.geom.width[1][0],\n **kwargs,\n )\n\n def plot(self, ax=None, geom=None, **kwargs):\n if geom is None:\n geom = self.map.geom\n super().plot(ax=ax, geom=geom, **kwargs)\n\n def plot_interative(self, ax=None, geom=None, **kwargs):\n if geom is None:\n geom = self.map.geom\n super().plot_interative(ax=ax, geom=geom, **kwargs)\n"
] |
[
[
"matplotlib.pyplot.gca",
"numpy.sqrt",
"numpy.abs",
"numpy.linalg.eig",
"numpy.cos",
"numpy.sort",
"numpy.sin",
"numpy.all",
"numpy.max",
"numpy.arctan2",
"numpy.ceil",
"numpy.errstate",
"numpy.exp",
"numpy.where",
"numpy.sum",
"numpy.zeros"
]
] |
MKegler/SpeechtACSmodel
|
[
"f7b747a1a286f2dd239f452184d790e096366de7"
] |
[
"PyNSL/PyNSL.py"
] |
[
"'''\nDirect Python port of a few functions from NSL toolbox implementing\nearly stage auditory processing. All credits go to the original authors.\nThe below implementation has been tested against the original \nMatlab code (http://nsl.isr.umd.edu/downloads.html) and yielded identical results.\nImplementation: Mikolaj Kegler ([email protected])\n'''\n\nimport numpy as np\nimport scipy.io as sio\nfrom scipy import signal\nimport os\nimport pkg_resources\n\ndef sigmoid(y, fac):\n '''\n Original documentation below.\n '''\n # SIGMOID nonlinear funcion for cochlear model\n # y = sigmoid(y, fac);\n # fac: non-linear factor\n # -- fac > 0, transister-like function\n # -- fac = 0, hard-limiter\n # -- fac = -1, half-wave rectifier\n # -- else, no operation, i.e., linear\n #\n # SIGMOID is a monotonic increasing function which simulates\n # hair cell nonlinearity.\n # See also: WAV2AUD, AUD2WAV\n #\n # % Auther: Powen Ru ([email protected]), NSL, UMD\n # % v1.00: 01-Jun-97\n\n if fac > 0:\n y = np.exp(-y/fac)\n y = 1./(1+y)\n elif fac == 0:\n y = (y > 0) #hard-limiter\n elif fac == -1:\n y = np.max(y, 0) # half-wave rectifier\n elif fac == -3:\n y = halfregu(y)\n\n return y\n\ndef halfregu(y):\n # Placeholder\n return y\n\n\ndef wav2aud(x, paras=[8,8,-2,-1], COCHBA=None):\n '''\n Original documentation below.\n '''\n # % WAV2AUD fast auditory spectrogramm (for band 180 - 7246 Hz)\n # x : the acoustic input.\n # %\tv5\t: the auditory spectrogram, N-by-(M-1)\n # %\n # %\tCOCHBA = (global) [cochead; cochfil]; (IIR filter)\n # % cochead : 1-by-M filter length (<= L) vector.\n # % f = real(cochead); filter order\n # % CF = imag(cochead); characteristic frequency\n # %\tcochfil : (Pmax+2)-by-M (L-by-M) [M]-channel filterbank matrix.\n # %\t\tB = real(cochfil); MA (Moving Average) coefficients.\n # %\t\tA = imag(cochfil); AR (AutoRegressive) coefficients.\n # %\tM\t: highest (frequency) channel\n # %\n # %\tCOCHBA = [cochfil]; (IIR filter)\n # %\tcochfil : (L-by-M) [M]-channel filterbank impulse responses.\n # %\n # %\tPARAS\t= [frmlen, tc, fac, shft];\n # %\tfrmlen\t: frame length, typically, 8, 16 or 2^[natural #] ms.\n # %\ttc\t: time const., typically, 4, 16, or 64 ms, etc.\n # %\t\t if tc == 0, the leaky integration turns to short-term avg.\n # %\tfac\t: nonlinear factor (critical level ratio), typically, .1 for\n # %\t\t a unit sequence, e.g., X -- N(0, 1);\n # %\t\t The less the value, the more the compression.\n # %\t\t fac = 0, y = (x > 0), full compression, booleaner.\n # %\t\t fac = -1, y = max(x, 0), half-wave rectifier\n # %\t\t fac = -2, y = x, linear function\n # %\tshft\t: shifted by # of octave, e.g., 0 for 16k, -1 for 8k,\n # %\t\t etc. SF = 16K * 2^[shft].%\n # %\n # %\tfilt\t: filter type, 'p'--> Powen's IIR filter (default)\n # %\t\t\t 'p_o' --> Powen's old IIR filter (steeper group delay)\n # %\n # %\tIIR filter : (24 channels/oct)\n # %\tfor the output of \tdownsamp/shift\ttc (64 ms)/ frame (16 ms)\n # %\t==================================================================\n # %\t180 - 7246\t\t1\t/0\t1024\t/ 256\n # %\t90 - 3623\t\t2\t/-1\t512\t/ 128\t*\n # %\n # %\tCharacteristic Frequency: CF = 440 * 2 .^ ((-31:97)/24);\n # %\tRoughly, CF(60) = 1 (.5) kHz for 16 (8) kHz.\n # %\n # %\tVERB\t: verbose mode\n # %\n # %\tWAV2AUD computes the auditory spectrogram for an acoustic waveform.\n # %\tThis function takes the advantage of IIR filter's fast performance\n # %\twhich not only reduces the computaion but also saves remarkable\n # %\tmemory space.\n # %\tSee also: AUD2WAV, UNITSEQ\n\n # % Auther: Powen Ru ([email protected]), NSL, UMD\n # % v1.00: 01-Jun-97\n\n # % Revision: Taishih Chi ([email protected]), NSL, UMD\n # % v1.10: 04-Sep-98, add Kuansan's filter (as FIR filter)\n\n # % Revision: Taishih Chi ([email protected]), NSL, UMD\n # % v2.00: 24-Jul-01, add hair cell membrane (lowpass) function\n\n # % Revision: Taishih Chi ([email protected]), NSL, UMD\n # % v2.10: 04-Apr-04, remove FIR filtering option (see wav2aud_fir.m)\n\n # % get filter bank,\n # %\tL: filter coefficient length;\n # %\tM: no. of channels\n\n # Load cochlear filterbank if not pre-loaded\n if COCHBA is None:\n data_path = pkg_resources.resource_filename('PyNSL', 'aud24.mat')\n f = sio.loadmat(data_path)\n COCHBA = f['COCHBA']\n del f\n\n (L, M) = COCHBA.shape # p_max = L - 2\n L_x = len(x) # length of input\n\n # octave shift, nonlinear factor, frame length, leaky integration\n shft = paras[3] # octave shift (default -1, so 16kHz input == 8 kHz)\n fac = paras[2] # nonlinear factor (-2 == linear)\n L_frm = np.round(paras[0] * 2**(4+shft)).astype(int) # frame length (points), paras[0] 8 -> miliseconds\n\n if paras[1]:\n alph = np.exp(-1/(paras[1]*2**(4+shft))) # decaying factor\n else:\n alph = 0 # short-term avg.\n\n # hair cell time constant in ms\n haircell_tc = 0.5\n beta = np.exp(-1/(haircell_tc*2**(4+shft)))\n\n # get data, allocate memory for ouput\n N = np.ceil(L_x / L_frm).astype(int) # No. of frames\n x_tmp = np.zeros(N * L_frm)\n x_tmp[0:len(x)] = x[:]\n x = x_tmp[:]\n del x_tmp\n v5 = np.zeros((N, M-1))\n # CF = 440 * 2 .^ ((-31:97)/24) # Center frequencies\n\n # last channel (highest frequency)\n p = COCHBA[0, M-1].real\n idx = np.arange(0,p+1, dtype=int) + 1\n B = COCHBA[idx, M-1].real\n A = COCHBA[idx, M-1].imag\n y1 = signal.lfilter(B, A, x)\n y2 = sigmoid(y1, fac)\n\n # hair cell membrane (low-pass <= 4 kHz)\n # ignored for LINEAR ionic channels (fac == -2)\n if (fac != -2):\n y2 = signal.lfilter([1.], [1 -beta], y2)\n\n y2_h = y2[:]\n y3_h = 0\n\n for ch in (np.arange(M-1, 0, -1) - 1):\n p = COCHBA[0, ch].real\n idx = np.arange(0,p+1, dtype=int) + 1\n B = COCHBA[idx, ch].real\n A = COCHBA[idx, ch].imag\n y1 = signal.lfilter(B, A, x)\n\n # TRANSDUCTION: hair cells\n # Fluid cillia coupling (preemphasis) (ignored)\n # ionic channels (sigmoid function)\n y2 = sigmoid(y1, fac)[:]\n # hair cell membrane (low-pass <= 4 kHz) ---> y2 (ignored for linear)\n if (fac != -2):\n y2 = signal.lfilter([1.], [1 -beta], y2)\n\n # lateral inhibitory network\n # masked by higher (frequency) spatial response\n y3 = y2[:] - y2_h[:]\n y2_h = y2[:]\n\n # half-wave rectifier ---> y4\n y4 = np.maximum(y3, np.zeros(len(y3)))\n\n # temporal integration window ---> y5\n if alph: # leaky integration\n y5 = signal.lfilter([1.], [1, -alph], y4)\n v5[:, ch] = y5[(L_frm*np.arange(1,N+1)) - 1]\n else: # % short-term average\n if (L_frm == 1):\n v5[:, ch] = y4\n else:\n v5[:, ch] = np.mean(y4.reshape(L_frm,N,order='F').copy())\n\n return v5\n"
] |
[
[
"numpy.arange",
"scipy.io.loadmat",
"numpy.round",
"numpy.ceil",
"numpy.max",
"scipy.signal.lfilter",
"numpy.exp",
"numpy.zeros"
]
] |
minghao92/LocalPer
|
[
"c940dce63ff2583f836d4718ce43023fad310c05"
] |
[
"localper_clustering_random_graphs.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport networkx as nx\nimport numpy as np\nimport math\nimport snap\nimport random\nimport os\nimport argparse\n\n\nimport matplotlib.pyplot as plt\n\nimport scipy.spatial.distance as ssd\nfrom scipy.cluster.hierarchy import dendrogram, linkage\n\nfrom sklearn.cluster import KMeans, MiniBatchKMeans\n\nfrom utils import dexPer_of_all_vertices_dir\nfrom utils import quantizer\nfrom utils import switch_to_snap_format, get_codebook_dexPer0, get_codebook_dexPer1\nfrom utils import nrgg_sphere, nrgg_torus\n\nfrom collections import defaultdict\n\nfrom gudhi.representations.vector_methods import Atol\n\ndef compute_LocalPer_vectors(networks_dir, k_ring, budget_0, budget_1): \n num_networks = len(networks_dir)\n\n dexPer0_Each = [[]] * len(networks_dir)\n dexPer1_Each = [[]] * len(networks_dir)\n\n # if k_ring >= 4, then take the top 30 types of points appearing in all the diagrams \n dexPer0_codebook_tuple = defaultdict(int)\n dexPer1_codebook_tuple = defaultdict(int)\n\n codebook_dexPer0 = []\n codebook_dexPer1 = []\n\n for i in range(len(networks_dir)):\n dexPer0_Each[i], dexPer1_Each[i] = dexPer_of_all_vertices_dir(networks_dir[i], k_ring) \n\n if k_ring >= 4:\n for items in dexPer0_Each[i]:\n for key, value in items.items():\n dexPer0_codebook_tuple[key] += value\n \n for items in dexPer1_Each[i]:\n for key, value in items.items():\n dexPer1_codebook_tuple[key] += value \n\n if k_ring >= 4:\n dexPer0_codebook_tuple = {k: v for k, v in sorted(dexPer0_codebook_tuple.items(), key=lambda item: item[1], reverse=True)[:30]}\n dexPer1_codebook_tuple = {k: v for k, v in sorted(dexPer1_codebook_tuple.items(), key=lambda item: item[1], reverse=True)[:30]}\n\n for key, value in dexPer0_codebook_tuple.items():\n codebook_dexPer0.append([key[0], key[1]])\n\n for key, value in dexPer1_codebook_tuple.items():\n codebook_dexPer1.append([key[0], key[1]])\n else:\n codebook_dexPer0 = get_codebook_dexPer0(k_ring)\n codebook_dexPer1 = get_codebook_dexPer1(k_ring)\n\n quantizer_dexPer0 = quantizer(codebook_dexPer0)\n quantizer_dexPer1 = quantizer(codebook_dexPer1)\n\n vec_dexPer0_Each = [[]] * num_networks\n vec_dexPer1_Each = [[]] * num_networks\n\n\n for i in range(num_networks):\n print(f\"quantizing network {i+1} / {num_networks}\", end=\"\\r\", flush=True)\n dexPer0_pairs = [[]] * len(dexPer0_Each[i])\n dexPer0_weights = [[]] * len(dexPer0_Each[i])\n\n for idx, item in enumerate(dexPer0_Each[i]):\n eh = []\n eh_weights = []\n for key, value in item.items():\n eh.append([key[0], key[1]])\n eh_weights.append(value)\n if len(eh) == 0:\n dexPer0_pairs[idx] = [[0.0, 0.0]]\n dexPer0_weights[idx] = [0]\n else:\n dexPer0_pairs[idx] = eh\n dexPer0_weights[idx] = eh_weights\n\n vec_dexPer0_Each[i] = quantizer_dexPer0.transform(dexPer0_pairs, dexPer0_weights)\n\n dexPer1_pairs = [[]] * len(dexPer1_Each[i])\n dexPer1_weights = [[]] * len(dexPer1_Each[i])\n\n for idx, item in enumerate(dexPer1_Each[i]):\n eh = []\n eh_weights = []\n for key, value in item.items():\n eh.append([key[0], key[1]])\n eh_weights.append(value)\n if len(eh) == 0:\n dexPer1_pairs[idx] = [[0.0, 0.0]]\n dexPer1_weights[idx] = [0]\n else:\n dexPer1_pairs[idx] = eh\n dexPer1_weights[idx] = eh_weights\n\n vec_dexPer1_Each[i] = quantizer_dexPer1.transform(dexPer1_pairs, dexPer1_weights)\n \n\n vec_dexPer0_Each = np.asarray(vec_dexPer0_Each, dtype=object)\n vec_dexPer1_Each = np.asarray(vec_dexPer1_Each, dtype=object)\n\n print(\"Quantization Done!\")\n\n vec_ATOL_dexPer0 = Atol(quantiser=MiniBatchKMeans(n_clusters=budget_0, random_state=42), weighting_method=\"iidproba\")\n vec_ATOL_dexPer0.fit(X=vec_dexPer0_Each)\n LocalPer_dexPer0 = vec_ATOL_dexPer0.transform(vec_dexPer0_Each)\n\n vec_ATOL_dexPer1 = Atol(quantiser=MiniBatchKMeans(n_clusters=budget_1, random_state=42), weighting_method=\"iidproba\")\n vec_ATOL_dexPer1.fit(X=vec_dexPer1_Each)\n LocalPer_dexPer1 = vec_ATOL_dexPer1.transform(vec_dexPer1_Each)\n\n return LocalPer_dexPer0, LocalPer_dexPer1\n\ndef localPer_clustering_random_graphs(dataset_scale, num_per_category, ring_number, budget_0, budget_1):\n num_networks_per_category = num_per_category\n leaf_size = 4\n\n if dataset_scale == \"small\":\n datasets = [\"A_ER\", \"B_Barabasi_Albert\", \"C_RGG\", \"D_WSG\", \"E_Sphere\", \"F_Torus\"]\n elif dataset_scale == \"large\":\n datasets = [\"A_LARGE_ER\", \"B_LARGE_Barabasi_Albert\", \"C_LARGE_RGG\", \"D_LARGE_WSG\", \"E_LARGE_Sphere\", \"F_LARGE_Torus\"]\n elif dataset_scale == \"sparse\":\n datasets = [\"A_SPARSE_ER\", \"B_SPARSE_Barabasi_Albert\", \"C_SPARSE_RGG\", \"D_SPARSE_WSG\", \"E_SPARSE_Sphere\", \"F_SPARSE_Torus\"]\n elif dataset_scale == \"small_large\":\n datasets = [\"A_ER\", \"A_LARGE_ER\", \"B_Barabasi_Albert\", \"B_LARGE_Barabasi_Albert\", \"C_RGG\", \"C_LARGE_RGG\", \"E_Sphere\", \"E_LARGE_Sphere\"]\n elif dataset_scale == \"small_sparse\":\n datasets = [\"A_ER\", \"A_SPARSE_ER\", \"B_Barabasi_Albert\", \"B_SPARSE_Barabasi_Albert\", \"C_RGG\", \"C_SPARSE_RGG\", \"E_Sphere\", \"E_SPARSE_Sphere\"]\n elif dataset_scale == \"large_sparse\":\n datasets = [\"A_LARGE_ER\", \"A_SPARSE_ER\", \"B_LARGE_Barabasi_Albert\", \"B_SPARSE_Barabasi_Albert\", \"C_LARGE_RGG\", \"C_SPARSE_RGG\", \"E_LARGE_Sphere\", \"E_SPARSE_Sphere\"]\n elif dataset_scale == \"small_large_sparse\":\n leaf_size = 2.5\n datasets = [\"A_ER\", \"A_LARGE_ER\", \"A_SPARSE_ER\", \"B_Barabasi_Albert\", \"B_LARGE_Barabasi_Albert\", \"B_SPARSE_Barabasi_Albert\", \"C_RGG\", \"C_LARGE_RGG\", \"C_SPARSE_RGG\", \"E_Sphere\", \"E_LARGE_Sphere\", \"E_SPARSE_Sphere\"]\n\n else:\n print(\"Please set the scale of random graphs from 'small', 'large' and 'combined'.\")\n return \n \n num_categories = len(datasets)\n \n k_ring = ring_number\n\n b0 = budget_0\n b1 = budget_1\n \n Dir = './syn_data/'\n \n if not os.path.isdir(Dir):\n os.makedirs(Dir)\n\n networks_dir = []\n networks_names = []\n networks = []\n\n print(\"Generating random networks...\")\n \n ##################################################################\n \n for name in datasets:\n if name == \"A_ER\":\n for i in range(1, num_networks_per_category+1):\n n = 1000\n p = 0.01\n\n network_name = name + \"_n_\" + str(n) + \"_p_\" + str(p) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n g = nx.gnp_random_graph(n, p, seed=np.random) \n nx.write_edgelist(g, network_dir, delimiter='\\t', data=False)\n \n\n if name == \"A_LARGE_ER\":\n for i in range(1, num_networks_per_category+1):\n n = 2000 # 1000\n p = 0.01 # 0.005\n \n network_name = name + \"_n_\" + str(n) + \"_p_\" + str(p) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n g = nx.gnp_random_graph(n, p, seed=np.random)\n nx.write_edgelist(g, network_dir, delimiter='\\t', data=False)\n\n if name == \"A_SPARSE_ER\":\n for i in range(1, num_networks_per_category+1):\n n = 1000\n p = 0.004\n \n network_name = name + \"_n_\" + str(n) + \"_p_\" + str(p) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n g = nx.gnp_random_graph(n, p, seed=np.random) \n nx.write_edgelist(g, network_dir, delimiter='\\t', data=False)\n \n\n\n if name == \"B_Barabasi_Albert\":\n for i in range(1, num_networks_per_category+1):\n n = 1000\n m = 5\n \n network_name = name + \"_n_\" + str(n) + \"_m_\" + str(m) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n g = nx.barabasi_albert_graph(n, m, seed=np.random) \n nx.write_edgelist(g, network_dir, delimiter='\\t', data=False)\n \n\n if name == \"B_LARGE_Barabasi_Albert\":\n for i in range(1, num_networks_per_category+1):\n n = 2000\n m = 10\n \n network_name = name + \"_n_\" + str(n) + \"_m_\" + str(m) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n g = nx.barabasi_albert_graph(n, m, seed=np.random)\n nx.write_edgelist(g, network_dir, delimiter='\\t', data=False)\n \n\n if name == \"B_SPARSE_Barabasi_Albert\":\n for i in range(1, num_networks_per_category+1):\n n = 1000\n m = 2\n \n network_name = name + \"_n_\" + str(n) + \"_m_\" + str(m) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n g = nx.barabasi_albert_graph(n, m, seed=np.random) \n nx.write_edgelist(g, network_dir, delimiter='\\t', data=False)\n \n if name == \"C_RGG\":\n for i in range(1, num_networks_per_category+1):\n n = 1000 #2000 # 1000\n dim = 3\n r = 0.141\n \n network_name = name + \"_n_\" + str(n) + \"_dim_\" + str(dim) + \"_r_\" + str(r) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n g = nx.random_geometric_graph(n, r, dim) \n nx.write_edgelist(g, network_dir, delimiter='\\t', data=False)\n \n\n if name == \"C_LARGE_RGG\":\n for i in range(1, num_networks_per_category+1):\n n = 2000\n dim = 3\n r = 0.141\n \n network_name = name + \"_n_\" + str(n) + \"_dim_\" + str(dim) + \"_r_\" + str(r) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n g = nx.random_geometric_graph(n, r, dim) \n nx.write_edgelist(g, network_dir, delimiter='\\t', data=False)\n \n \n if name == \"C_SPARSE_RGG\":\n for i in range(1, num_networks_per_category+1):\n n = 1000\n dim = 3\n r = 0.103\n \n network_name = name + \"_n_\" + str(n) + \"_dim_\" + str(dim) + \"_r_\" + str(r) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n g = nx.random_geometric_graph(n, r, dim) \n nx.write_edgelist(g, network_dir, delimiter='\\t', data=False)\n \n\n if name == \"D_WSG\":\n for i in range(1, num_networks_per_category+1):\n n = 1000\n k = 10 \n q = 0.1\n \n network_name = name + \"_n_\" + str(n) + \"_knn_\" + str(k) + \"_q_\" + str(q) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n g = nx.watts_strogatz_graph(n, k, q) \n nx.write_edgelist(g, network_dir, delimiter='\\t', data=False)\n \n\n if name == \"D_LARGE_WSG\":\n for i in range(1, num_networks_per_category+1):\n n = 2000\n k = 20\n q = 0.1\n \n network_name = name + \"_n_\" + str(n) + \"_knn_\" + str(k) + \"_q_\" + str(q) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir) \n\n if not os.path.exists(network_dir):\n g = nx.watts_strogatz_graph(n, k, q) \n nx.write_edgelist(g, network_dir, delimiter='\\t', data=False)\n \n\n\n if name == \"D_SPARSE_WSG\":\n for i in range(1, num_networks_per_category+1):\n n = 1000\n k = 4 \n q = 0.1\n \n network_name = name + \"_n_\" + str(n) + \"_knn_\" + str(k) + \"_q_\" + str(q) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n g = nx.watts_strogatz_graph(n, k, q)\n nx.write_edgelist(g, network_dir, delimiter='\\t', data=False)\n \n\n if name == \"E_Sphere\":\n for i in range(1, num_networks_per_category+1):\n n = 1000\n d = 2\n r = 1.0\n nbhd = 0.19\n p = 0.001 \n network_name = name + \"_n_\" + str(n) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n nrgg_sphere(n, d, r, nbhd, p, network_dir)\n\n if name == \"E_LARGE_Sphere\":\n for i in range(1, num_networks_per_category+1):\n n = 2000\n d = 2\n r = 1.0\n nbhd = 0.19\n p = 0.001 \n network_name = name + \"_LARGE_n_\" + str(n) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n nrgg_sphere(n, d, r, nbhd, p, network_dir)\n \n\n if name == \"E_SPARSE_Sphere\":\n for i in range(1, num_networks_per_category+1):\n n = 1000\n d = 2\n r = 1.0\n nbhd = 0.11\n p = 0.001 \n network_name = name + \"_n_\" + str(n) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n nrgg_sphere(n, d, r, nbhd, p, network_dir)\n\n\n if name == \"F_Torus\":\n for i in range(1, num_networks_per_category+1):\n n = 1000\n a = 1.8\n c = 5\n nbhd = 1.0\n p = 0.001 \n network_name = name + \"_n_\" + str(n) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n nrgg_torus(n, a, c, nbhd, p, network_dir)\n\n if name == \"F_LARGE_Torus\":\n for i in range(1, num_networks_per_category+1):\n n = 2000 \n a = 1.8\n c = 5\n nbhd = 1.0\n p = 0.001 \n network_name = name + \"_LARGE_n_\" + str(n) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n nrgg_torus(n, a, c, nbhd, p, network_dir)\n\n if name == \"F_SPARSE_Torus\":\n for i in range(1, num_networks_per_category+1):\n n = 1000\n a = 1.8\n c = 5\n nbhd = 0.58\n p = 0.001 \n network_name = name + \"_n_\" + str(n) + \"-\" + str(i) \n networks_names.append(network_name)\n network_dir = Dir + network_name\n networks_dir.append(network_dir)\n\n if not os.path.exists(network_dir):\n nrgg_torus(n, a, c, nbhd, p, network_dir)\n \n print(networks_names) \n\n LocalPer_dexPer0_all_networks, LocalPer_dexPer1_all_networks = compute_LocalPer_vectors(networks_dir, k_ring, b0, b1)\n LocalPer_vec_networks = np.hstack((LocalPer_dexPer0_all_networks, LocalPer_dexPer1_all_networks))\n\n print(\"Finish LocalPer featurization!\")\n \n total_networks = num_categories * num_networks_per_category\n \n distance_matrix_vec = np.zeros([total_networks, total_networks])\n \n for i in range(total_networks):\n for j in range(i):\n distance_matrix_vec[i][j] = np.linalg.norm(LocalPer_vec_networks[i] - LocalPer_vec_networks[j]) # L2 norm\n distance_matrix_vec[j][i] = distance_matrix_vec[i][j]\n\n fig1 = plt.figure()\n axes = fig1.add_subplot(111)\n caxes = axes.matshow(distance_matrix_vec)\n plt.title(f\"LocalPer_{dataset_scale}_Random_graphs_{k_ring}_ring_L2\")\n fig1.colorbar(caxes)\n plt.savefig(f\"LocalPer_{dataset_scale}_Random_graphs_{k_ring}_ring_L2.pdf\")\n plt.show()\n\n\n fig2 = plt.figure() \n distArray = ssd.squareform(distance_matrix_vec)\n linked = linkage(distArray, 'average')\n den = dendrogram(linked, labels=networks_names, orientation='left', distance_sort='descending', show_leaf_counts=False, leaf_font_size=leaf_size)\n plt.savefig(f\"Den_LocalPer_{dataset_scale}_Random_graphs_{k_ring}_ring_L2.pdf\", bbox_inches='tight')\n plt.show()\n\n \n###############################################################################\n\ndef main(dataset_scale, num_per_category, ring_number, budget_0, budget_1):\n localPer_clustering_random_graphs(dataset_scale, num_per_category, ring_number, budget_0, budget_1)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description = 'Clustering synthetic random networks using LocalPer method, including ER, BA, RGG, WSG, SPHERE and TORUS.')\n parser.add_argument('-S', '--dataset_scale', type=str, default=\"small\", help=\"Scale of the synthetic datasets. Should be one of 'sparse', 'small', 'large', 'small_large_sparse', 'small_sparse', 'large_sparse' and 'small_large'.\")\n parser.add_argument('-N', '--num_per_category', type=int, default=\"10\", help=\"Number of networks in each category.\")\n parser.add_argument('-k', '--ring_number', type=int, default=\"1\", help=\"The ring number. Should be a small integer.\")\n parser.add_argument('-b0', '--budget_0', type=int, default=\"50\", help=\"The budget for the 0th dimensional features.\")\n parser.add_argument('-b1', '--budget_1', type=int, default=\"50\", help=\"The budget for the 1st dimensional features.\")\n args = parser.parse_args()\n main(args.dataset_scale, args.num_per_category, args.ring_number, args.budget_0, args.budget_1)\n\n\n"
] |
[
[
"numpy.hstack",
"matplotlib.pyplot.title",
"numpy.asarray",
"scipy.cluster.hierarchy.dendrogram",
"numpy.linalg.norm",
"matplotlib.pyplot.savefig",
"scipy.cluster.hierarchy.linkage",
"scipy.spatial.distance.squareform",
"sklearn.cluster.MiniBatchKMeans",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] |
islamamirul/PermuteNet
|
[
"842591892d5b3af0e175a99b21c8e4e7cd7bd94b"
] |
[
"channel-wise-position-encoding/models/vgg.py"
] |
[
"'''VGG11/13/16/19 in Pytorch.'''\nimport torch\nimport torch.nn as nn\nfrom collections import OrderedDict\nimport torch.nn.functional as F\n\n\ncfg = {\n 'VGG5': [32, 'M', 64, 'M', 128, 'M', 256],\n 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\ndef upsample_bilinear(x, size):\n if float(torch.__version__[:3]) <= 0.3:\n out = F.upsample(x, size, mode='bilinear')\n else:\n out = F.interpolate(x, size, mode='bilinear', align_corners=True)\n return out\n\n\nclass VGG(nn.Module):\n def __init__(self, vgg_name):\n super(VGG, self).__init__()\n self.features = self._make_layers(cfg[vgg_name])\n self.classifier = nn.Linear(512, 10)\n\n def forward(self, x):\n out = self.features(x)\n # print(out.shape)\n # out = out.view(out.size(0), -1)\n # print(out.shape, x.shape)\n out = F.avg_pool2d(out, out.shape[3])\n\n out = out.view(out.size(0), -1)\n # print(out.shape)\n out_location = self.classifier(out)\n return out_location\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),\n nn.BatchNorm2d(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n # layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n return nn.Sequential(*layers)\n\n\nclass VGGGAP(nn.Module):\n def __init__(self, vgg_name, num_class=25):\n super(VGGGAP, self).__init__()\n self.features = self._make_layers(cfg[vgg_name])\n self.pre_classifier = nn.Conv2d(512, num_class, kernel_size=3, padding=1)\n\n def forward(self, x):\n out = self.features(x)\n out = self.pre_classifier(out)\n out = F.avg_pool2d(out, out.shape[3])\n out = out.view(out.size(0), -1)\n # print(out.shape)\n\n return out\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),\n nn.BatchNorm2d(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n # layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n return nn.Sequential(*layers)\n\n\nclass VGGGAPReflection(nn.Module):\n def __init__(self, vgg_name, num_class=25):\n super(VGGGAPReflection, self).__init__()\n self.features = self._make_layers(cfg[vgg_name])\n self.pre_classifier = nn.Conv2d(512, num_class, kernel_size=3, padding=1, padding_mode='reflect')\n\n def forward(self, x):\n out = self.features(x)\n out = self.pre_classifier(out)\n out = F.avg_pool2d(out, out.shape[3])\n out = out.view(out.size(0), -1)\n # print(out.shape)\n\n return out\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1, padding_mode='reflect'),\n nn.BatchNorm2d(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n # layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n return nn.Sequential(*layers)\n\n\nclass VGGGAPReplicate(nn.Module):\n def __init__(self, vgg_name, num_class=25):\n super(VGGGAPReplicate, self).__init__()\n self.features = self._make_layers(cfg[vgg_name])\n self.pre_classifier = nn.Conv2d(512, num_class, kernel_size=3, padding=1, padding_mode='replicate')\n\n def forward(self, x):\n out = self.features(x)\n out = self.pre_classifier(out)\n out = F.avg_pool2d(out, out.shape[3])\n out = out.view(out.size(0), -1)\n # print(out.shape)\n\n return out\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1, padding_mode='replicate'),\n nn.BatchNorm2d(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n # layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n return nn.Sequential(*layers)\n\n\nclass VGGGAPShuffle(nn.Module):\n def __init__(self, vgg_name, num_class=25, shuffle=False):\n super(VGGGAPShuffle, self).__init__()\n self.features = self._make_layers(cfg[vgg_name])\n # self.pre_classifier = nn.Conv2d(512, num_class, kernel_size=3, padding=1)\n self.classifier = nn.Linear(512, num_class)\n self.shuffle = shuffle\n\n def forward(self, x):\n out = self.features(x)\n # out = self.pre_classifier(out)\n out = F.avg_pool2d(out, out.shape[3])\n out = out.view(out.size(0), -1)\n\n if self.shuffle:\n rand_index = torch.randperm(512)\n out = out[:, rand_index]\n out = self.classifier(out)\n\n # print(out.shape)\n return out\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1, padding_mode='zeros'),\n nn.BatchNorm2d(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n # layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n return nn.Sequential(*layers)\n\n\nclass VGGGAPShuffleReflection(nn.Module):\n def __init__(self, vgg_name, num_class=25, shuffle=False):\n super(VGGGAPShuffleReflection, self).__init__()\n self.features = self._make_layers(cfg[vgg_name])\n # self.pre_classifier = nn.Conv2d(512, num_class, kernel_size=3, padding=1)\n self.classifier = nn.Linear(512, num_class)\n self.shuffle = shuffle\n\n def forward(self, x):\n out = self.features(x)\n # out = self.pre_classifier(out)\n out = F.avg_pool2d(out, out.shape[3])\n out = out.view(out.size(0), -1)\n\n if self.shuffle:\n rand_index = torch.randperm(512)\n out = out[:, rand_index]\n out = self.classifier(out)\n\n # print(out.shape)\n return out\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1, padding_mode='reflect'),\n nn.BatchNorm2d(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n # layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n return nn.Sequential(*layers)\n\n\nclass VGGGAPShuffleReplicate(nn.Module):\n def __init__(self, vgg_name, num_class=25, shuffle=False):\n super(VGGGAPShuffleReplicate, self).__init__()\n self.features = self._make_layers(cfg[vgg_name])\n # self.pre_classifier = nn.Conv2d(512, num_class, kernel_size=3, padding=1)\n self.classifier = nn.Linear(512, num_class)\n self.shuffle = shuffle\n\n def forward(self, x):\n out = self.features(x)\n # out = self.pre_classifier(out)\n out = F.avg_pool2d(out, out.shape[3])\n out = out.view(out.size(0), -1)\n\n if self.shuffle:\n rand_index = torch.randperm(512)\n out = out[:, rand_index]\n out = self.classifier(out)\n\n # print(out.shape)\n return out\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1, padding_mode='replicate'),\n nn.BatchNorm2d(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n # layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n return nn.Sequential(*layers)\n\n\nclass VGGSemantic(nn.Module):\n def __init__(self, vgg_name):\n super(VGGSemantic, self).__init__()\n self.features = self._make_layers(cfg[vgg_name])\n # self.classifier = nn.Linear(512, 10)\n # self.semantic_location = nn.Sequential(\n # OrderedDict([\n # ('conv5_4', nn.Conv2d(512, 256, 3, 1, 1, 1)),\n # ('conv5_4_bn', nn.BatchNorm2d(256)),\n # ('conv5_4_relu', nn.ReLU()),\n # ('drop5_4', nn.Dropout2d(p=0.1)),\n # ('conv6', nn.Conv2d(256, 11, 1, stride=1, padding=0)),\n # ])\n # )\n\n self.semantic_location = nn.Conv2d(512, 11, kernel_size=1, padding=0)\n\n def forward(self, x):\n out = self.features(x)\n # out = out.view(out.size(0), -1)\n out = self.semantic_location(out)\n # print(out.shape)\n out = upsample_bilinear(out, x.size()[2:])\n return out\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),\n nn.BatchNorm2d(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n # layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n return nn.Sequential(*layers)\n\n\n\n"
] |
[
[
"torch.nn.functional.upsample",
"torch.nn.Sequential",
"torch.randperm",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
Rijksmuseum-Voice-Inference/voice-to-voice-translation
|
[
"bbdeb3fd4dc49311a735b59a580a33996ea6bbd3"
] |
[
"scripts/extract_features_for_merlin.py"
] |
[
"import os\nimport sys\nimport shutil\nimport glob\nimport time\nimport multiprocessing as mp\nimport numpy as np\nimport wave\n\nif len(sys.argv)!=5:\n print(\"Usage: \")\n print(\"python extract_features_for_merlin.py <path_to_merlin_dir> <path_to_wav_dir> <path_to_feat_dir> <sampling rate>\")\n sys.exit(1)\n\n# top merlin directory\nmerlin_dir = sys.argv[1]\n\n# input audio directory\nwav_dir = sys.argv[2]\n\n# Output features directory\nout_dir = sys.argv[3]\n\n# initializations\nfs = int(sys.argv[4])\n\n# tools directory\nworld = os.path.join(merlin_dir, \"tools/bin/WORLD\")\nsptk = os.path.join(merlin_dir, \"tools/bin/SPTK-3.9\")\nreaper = os.path.join(merlin_dir, \"tools/bin/REAPER\")\n\nsp_dir = os.path.join(out_dir, 'sp' )\nmgc_dir = os.path.join(out_dir, 'mgc')\nap_dir = os.path.join(out_dir, 'ap' )\nbap_dir = os.path.join(out_dir, 'bap')\nf0_dir = os.path.join(out_dir, 'f0' )\nlf0_dir = os.path.join(out_dir, 'lf0')\n\nif not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\nif not os.path.exists(sp_dir):\n os.mkdir(sp_dir)\n\nif not os.path.exists(mgc_dir):\n os.mkdir(mgc_dir)\n\nif not os.path.exists(bap_dir):\n os.mkdir(bap_dir)\n\nif not os.path.exists(f0_dir):\n os.mkdir(f0_dir)\n\nif not os.path.exists(lf0_dir):\n os.mkdir(lf0_dir)\n\nif fs == 16000:\n nFFTHalf = 1024\n alpha = 0.58\n\nelif fs == 22050:\n nFFTHalf = 1024\n alpha = 0.65\n\nelif fs == 44100:\n nFFTHalf = 2048\n alpha = 0.76\n\nelif fs == 48000:\n nFFTHalf = 2048\n alpha = 0.77\n\nelse:\n print(\"As of now, we don't support %d Hz sampling rate.\" %(fs))\n print(\"Please consider either downsampling to 16000 Hz or upsampling to 48000 Hz\")\n sys.exit(1)\n\n#bap order depends on sampling rate.\nmcsize=59\nb_use_reaper=True # If True: Reaper is used for f0 extraction. If False: The vocoder is used for f0 extraction.\n\ndef get_wav_filelist(wav_dir):\n wav_files = []\n for file in os.listdir(wav_dir):\n whole_filepath = os.path.join(wav_dir,file)\n if os.path.isfile(whole_filepath) and str(whole_filepath).endswith(\".wav\"):\n # dont add stereo files\n if wave.open(whole_filepath, 'r').getnchannels() > 1: continue\n wav_files.append(whole_filepath)\n elif os.path.isdir(whole_filepath):\n wav_files += get_wav_filelist(whole_filepath)\n\n wav_files.sort()\n\n return wav_files\n\n\ndef read_binfile(filename, dim=60, dtype=np.float64):\n '''\n Reads binary file into numpy array.\n '''\n fid = open(filename, 'rb')\n v_data = np.fromfile(fid, dtype=dtype)\n fid.close()\n if np.mod(v_data.size, dim) != 0:\n raise ValueError('Dimension provided not compatible with file size.')\n m_data = v_data.reshape((-1, dim)).astype('float64') # This is to keep compatibility with numpy default dtype.\n m_data = np.squeeze(m_data)\n return m_data\n\ndef write_binfile(m_data, filename, dtype=np.float64):\n '''\n Writes numpy array into binary file.\n '''\n m_data = np.array(m_data, dtype)\n fid = open(filename, 'wb')\n m_data.tofile(fid)\n fid.close()\n return\n\ndef read_reaper_f0_file(est_file, skiprows=7):\n '''\n Reads f0 track into numpy array from EST file generated by REAPER.\n '''\n v_f0 = np.loadtxt(est_file, skiprows=skiprows, usecols=[2])\n v_f0[v_f0<0] = 0\n return v_f0\n\ndef reaper_f0_extract(in_wavfile, f0_file_ref, f0_file_out, frame_shift_ms=5.0):\n '''\n Extracts f0 track using REAPER.\n To keep consistency with the vocoder, it also fixes for the difference in number\n of frames between the REAPER f0 track and the acoustic parameters extracted by the vocoder.\n f0_file_ref: f0 extracted by the vocoder. It is used as a reference to fix the number of frames, as explained.\n '''\n\n # Run REAPER:\n print(\"Running REAPER f0 extraction...\")\n cmd = \"%s -a -s -x 400 -m 50 -u %1.4f -i %s -f %s\" % (os.path.join(reaper, 'reaper'), frame_shift_ms / 1000.0, in_wavfile, f0_file_out + \"_reaper\")\n os.system(cmd)\n\n # Protection - number of frames:\n v_f0_ref = read_binfile(f0_file_ref, dim=1)\n v_f0 = read_reaper_f0_file(f0_file_out + \"_reaper\")\n frm_diff = v_f0.size - v_f0_ref.size\n if frm_diff<0:\n v_f0 = np.r_[ v_f0, np.zeros(-frm_diff) + v_f0[-1]]\n if frm_diff>0:\n v_f0 = v_f0[:-frm_diff]\n\n # Save f0 file:\n write_binfile(v_f0, f0_file_out)\n return\n\n\ndef process(filename):\n '''\n The function decomposes a wav file into F0, mel-cepstral coefficients, and aperiodicity\n :param filename: path to wav file\n :return: .lf0, .mgc and .bap files\n '''\n\n file_id = os.path.basename(filename).split(\".\")[0]\n print('\\n' + file_id)\n\n ### WORLD ANALYSIS -- extract vocoder parameters ###\n ### extract sp, ap ###\n f0_file = os.path.join(f0_dir, file_id + '.f0')\n f0_world_file = f0_file\n if b_use_reaper:\n f0_world_file = f0_file + \"_world\"\n\n world_analysis_cmd = \"%s %s %s %s %s\" % (os.path.join(world, 'analysis'), \\\n filename,\n f0_world_file, \\\n os.path.join(sp_dir, file_id + '.sp'), \\\n os.path.join(bap_dir, file_id + '.bapd'))\n os.system(world_analysis_cmd)\n\n ### Extract f0 using reaper ###\n if b_use_reaper:\n reaper_f0_extract(filename, f0_world_file, f0_file)\n\n ### convert f0 to lf0 ###\n sptk_x2x_da_cmd = \"%s +da %s > %s\" % (os.path.join(sptk, 'x2x'), f0_file, \\\n os.path.join(f0_dir, file_id + '.f0a'))\n os.system(sptk_x2x_da_cmd)\n\n sptk_x2x_af_cmd = \"%s +af %s | %s > %s \" % (os.path.join(sptk, 'x2x'), \\\n os.path.join(f0_dir, file_id + '.f0a'), \\\n os.path.join(sptk, 'sopr') + ' -magic 0.0 -LN -MAGIC -1.0E+10', \\\n os.path.join(lf0_dir, file_id + '.lf0'))\n os.system(sptk_x2x_af_cmd)\n\n ### convert sp to mgc ###\n sptk_x2x_df_cmd1 = \"%s +df %s | %s | %s >%s\" % (os.path.join(sptk, 'x2x'), \\\n os.path.join(sp_dir, file_id + '.sp'), \\\n os.path.join(sptk, 'sopr') + ' -R -m 32768.0', \\\n os.path.join(sptk, 'mcep') + ' -a ' + str(alpha) + ' -m ' + str(\n mcsize) + ' -l ' + str(\n nFFTHalf) + ' -e 1.0E-8 -j 0 -f 0.0 -q 3 ', \\\n os.path.join(mgc_dir, file_id + '.mgc'))\n os.system(sptk_x2x_df_cmd1)\n\n ### convert bapd to bap ###\n sptk_x2x_df_cmd2 = \"%s +df %s > %s \" % (os.path.join(sptk, \"x2x\"), \\\n os.path.join(bap_dir, file_id + \".bapd\"), \\\n os.path.join(bap_dir, file_id + '.bap'))\n os.system(sptk_x2x_df_cmd2)\n\ndef try_process(filename):\n try:\n process(filename)\n except Exception as e:\n print(e)\n\nprint(\"--- Feature extraction started ---\")\nstart_time = time.time()\n\n# get wav files list\nwav_files = get_wav_filelist(wav_dir)\n\n# do multi-processing\npool = mp.Pool(mp.cpu_count())\npool.map(try_process, wav_files)\n\n# DEBUG:\n#for nxf in xrange(len(wav_files)):\n# process(wav_files[nxf])\n\n# clean temporal files\nshutil.rmtree(sp_dir, ignore_errors=True)\nshutil.rmtree(f0_dir, ignore_errors=True)\n\n\nfor zippath in glob.iglob(os.path.join(bap_dir, '*.bapd')):\n os.remove(zippath)\n\nprint(\"You should have your features ready in: \"+out_dir)\n\n(m, s) = divmod(int(time.time() - start_time), 60)\nprint((\"--- Feature extraction completion time: %d min. %d sec ---\" % (m, s)))\n\n"
] |
[
[
"numpy.fromfile",
"numpy.squeeze",
"numpy.mod",
"numpy.array",
"numpy.zeros",
"numpy.loadtxt"
]
] |
ankitjain0101/Google-Store-Customer-Profiling
|
[
"627017c8dae8c3e7d643fdac79b0fba42ac5ffc2"
] |
[
"GA_Revenue.py"
] |
[
"import pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport json\r\nimport collections\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom lightgbm.sklearn import LGBMRegressor\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import mean_absolute_error,mean_squared_error\r\nimport lightgbm as lgbm\r\n#import dask.dataframe as dd\r\nimport gc\r\ngc.enable()\r\n\r\ntrain = pd.read_csv('E:/College/Analytics/Python/GA-Revenue/train_v2.csv')\r\ntest = pd.read_csv('E:/College/Analytics/Python/GA-Revenue/test_v2.csv')\r\n#test = dd.read_csv('E:/College/Analytics/Python/GA-Revenue/test_v2.csv')\r\n#train = dd.read_csv('E:/College/Analytics/Python/GA-Revenue/train_v2.csv')\r\n\r\ntrain.dtypes\r\ntrain.head(5)\r\ntrain.shape\r\ntrain.columns.values\r\n\r\ntest.dtypes\r\ntest.head(5)\r\ntest.shape\r\ntest.columns.values\r\n\r\ntrain.isnull().sum().sort_values(ascending=False)\r\n\r\ntrain['channelGrouping'].value_counts().plot(kind=\"bar\",title=\"Channel Grouping Distrubution\",figsize=(8,8),rot=25)\r\nsns.countplot(train['channelGrouping'])\r\n\r\ntrain['socialEngagementType'].value_counts()\r\ntrain['socialEngagementType'].describe()\r\n\r\ntrain.head(1)[[\"date\",\"visitStartTime\"]]\r\ntrain[\"date\"]=pd.to_datetime(train[\"date\"],format=\"%Y%m%d\")\r\ntrain[\"visitStartTime\"]=pd.to_datetime(train[\"visitStartTime\"],unit='s')\r\n\r\ntrain.head(1)[[\"date\",\"visitStartTime\"]]\r\n\r\nlist_of_devices = train['geoNetwork'].apply(json.loads).tolist()\r\nkeys = []\r\nfor devices_iter in list_of_devices:\r\n for list_element in list(devices_iter.keys()):\r\n if list_element not in keys:\r\n keys.append(list_element)\r\n \r\n\"keys existed in device attribute are:{}\".format(keys)\r\n\r\ndevice_df = pd.DataFrame(train.device.apply(json.loads).tolist())[[\"browser\",\"operatingSystem\",\"deviceCategory\",\"isMobile\"]]\r\ndevice_df.head(5)\r\ntraffic_source_df = pd.DataFrame(train.trafficSource.apply(json.loads).tolist())[[\"keyword\",\"medium\" , \"source\"]]\r\ntraffic_source_df.head(5)\r\ngeo_df = pd.DataFrame(train.geoNetwork.apply(json.loads).tolist())[[\"continent\",\"subContinent\",\"country\",\"city\"]]\r\ngeo_df.head(5)\r\ntotals_df = pd.DataFrame(train.totals.apply(json.loads).tolist())[[\"transactionRevenue\", \"newVisits\", \"bounces\", \"pageviews\", \"hits\"]]\r\ntotals_df.head(5)\r\n\r\n#Data Manipulation/Vizualization\r\nsns.countplot(device_df['isMobile'])\r\nsns.countplot(device_df['deviceCategory'])\r\ndevice_df['browser'].value_counts().head(10).plot(kind=\"bar\",title=\"Browser Distrubution\",figsize=(8,8),rot=25)\r\ndevice_df['operatingSystem'].value_counts().head(10).plot(kind=\"bar\",title=\"OS Distrubution\",figsize=(8,8),rot=25,color='teal')\r\n\r\nplt.subplots(figsize=(7, 6))\r\nsns.countplot(geo_df[geo_df['continent']== \"Asia\"]['subContinent'])\r\n\r\ngeo_df['continent'].value_counts().plot(kind=\"bar\",title=\"Continent Distrubution\",figsize=(8,8),rot=0)\r\ngeo_df[geo_df['continent']== \"Asia\"]['subContinent'].value_counts().plot(kind=\"bar\",title=\"Asia Distrubution\",figsize=(8,8),rot=0)\r\ngeo_df[geo_df['continent']== \"Europe\"]['subContinent'].value_counts().plot(kind=\"bar\",title=\"Europe Distrubution\",figsize=(8,8),rot=0)\r\n\r\ntraffic_source_df[\"medium\"].value_counts().plot(kind=\"bar\",title=\"Medium\",rot=0)\r\ntraffic_source_df[\"source\"].value_counts().head(10).plot(kind=\"bar\",title=\"source\",rot=75,color=\"teal\")\r\n\r\nfig,axes = plt.subplots(1,2,figsize=(15,10))\r\ntraffic_source_df[\"keyword\"].value_counts().head(10).plot(kind=\"bar\",ax=axes[0], title=\"keywords (total)\",color=\"yellow\")\r\ntraffic_source_df[traffic_source_df[\"keyword\"] != \"(not provided)\"][\"keyword\"].value_counts().head(15).plot(kind=\"bar\",ax=axes[1],title=\"keywords (dropping NA)\",color=\"c\")\r\n\r\ntrain[\"revenue\"] = pd.DataFrame(train.totals.apply(json.loads).tolist())[[\"transactionRevenue\"]]\r\ntrain[\"revenue\"].value_counts().sort_values(ascending=False)\r\n#data[\"revenue\"]=data[\"revenue\"].astype(np.int64)\r\n\r\nrevdat_df = train[[\"revenue\", \"date\",\"visitNumber\"]].dropna()\r\nrevdat_df[\"revenue\"] = revdat_df.revenue.astype(np.int64)\r\nrevdat_df.head()\r\nplt.subplots(figsize=(20, 10))\r\nplt.plot(revdat_df.groupby(\"date\")[\"revenue\"].sum())\r\n\r\nab=revdat_df.groupby(\"date\").sum()\r\n\r\nvisitdate_df = train[[\"date\",\"visitNumber\"]]\r\nvisitdate_df[\"visitNumber\"] = visitdate_df.visitNumber.astype(np.int64)\r\nvisitdate_df.groupby(\"date\").sum()\r\n\r\nfig, ax1 = plt.subplots(figsize=(20,10))\r\nt = ab.index\r\ns1 = ab[\"visitNumber\"]\r\nax1.plot(t, s1, 'b-')\r\nax1.set_xlabel('day')\r\n# Make the y-axis label, ticks and tick labels match the line color.\r\nax1.set_ylabel('visitNumber', color='b')\r\nax1.tick_params('y', colors='b')\r\n\r\nax2 = ax1.twinx()\r\ns2 = ab[\"revenue\"]\r\nax2.plot(t, s2, 'r--')\r\nax2.set_ylabel('revenue', color='r')\r\nax2.tick_params('y', colors='r')\r\nfig.tight_layout()\r\n\r\nfig,ax = plt.subplots(figsize=(9,5))\r\nax.set_title(\"Histogram of log(visitNumbers) \\n per session\")\r\nax.set_ylabel(\"Repetition\")\r\nax.set_xlabel(\"Log(visitNumber)\")\r\nax.grid(color='b', linestyle='-', linewidth=0.1)\r\nax.hist(np.log(train['visitNumber']))\r\n\r\ntmp_least_visitNumbers_list = collections.Counter(list(train.visitNumber)).most_common()[:-10-1:-1]\r\ntmp_most_visitNumbers_list = collections.Counter(list(train.visitNumber)).most_common(10)\r\nleast_visitNumbers = []\r\nmost_visitNumbers = []\r\nfor i in tmp_least_visitNumbers_list:\r\n least_visitNumbers.append(i[0])\r\nfor i in tmp_most_visitNumbers_list:\r\n most_visitNumbers.append(i[0])\r\n\"10 most_common visitNumbers are {} times and 10 least_common visitNumbers are {} times\".format(most_visitNumbers,least_visitNumbers)\r\n\r\ntrain_all=pd.concat([train.drop([\"hits\"],axis=1),device_df,geo_df,traffic_source_df,totals_df],axis=1)\r\ntrain_all.dtypes\r\nfrom datetime import datetime\r\ntrain_all[\"month\"] = train_all['date'].dt.month\r\ntrain_all['visitHour'] = (train_all['visitStartTime'].apply(lambda x: str(datetime.fromtimestamp(x).hour))).astype(int)\r\nplt.figure(figsize=(10,5))\r\nsns.barplot(x='month', y=train_all['transactionRevenue'].astype(np.float), data=train_all)\r\nplt.figure(figsize=(10,5))\r\nsns.barplot(x='visitHour', y=train_all['transactionRevenue'].astype(np.float), data=train_all)\r\ntrain_all.visitNumber.value_counts().head(10).plot(kind=\"bar\",title=\"Vistor Numbers Distrubution\",figsize=(8,8),rot=25,color='teal')\r\n\r\ndf_train = train_all.drop(['date','month','device','geoNetwork','trafficSource','totals','customDimensions', 'socialEngagementType', 'visitStartTime', 'visitId', 'fullVisitorId' , 'revenue'], axis=1)\r\ndf_train.dtypes\r\ndf_train.shape\r\ndf_train.isnull().sum().sort_values(ascending=False)\r\ndf_train=df_train.fillna(0)\r\n\r\nnumerical_features = ['transactionRevenue','visitNumber', 'newVisits', 'bounces', 'pageviews', 'hits']\r\n\r\nfor col in numerical_features:\r\n df_train[col] = df_train[col].astype(np.float)\r\n\r\nvst_rev=df_train.groupby('visitNumber')['transactionRevenue'].agg(['count','mean','sum'])\r\nvst_rev.columns = [\"count\", \"mean transaction\",\"total revenue\"]\r\nvst_rev = vst_rev.sort_values(by=\"count\", ascending=False)\r\nsns.barplot(y=vst_rev['total revenue'].head(10),x=vst_rev.index[:10])\r\nsns.barplot(y=vst_rev['mean transaction'].head(10),x=vst_rev.index[:10])\r\n\r\n\r\ndef feat_plot(col):\r\n pt = df_train.loc[:,[col, 'transactionRevenue']]\r\n feat_vis=pt.groupby(col)['transactionRevenue'].agg(['count','mean'])\r\n feat_vis.columns = [\"count\", \"mean transaction value\"]\r\n feat_vis['total_revenue'] = feat_vis['count']*feat_vis['mean transaction value']\r\n feat_vis = feat_vis.sort_values(by=\"count\", ascending=False)\r\n plt.figure(figsize=(8, 16)) \r\n plt.subplot(2,1,1)\r\n sns.barplot(x=feat_vis['count'].head(10), y=feat_vis.index[:10])\r\n plt.subplot(2,1,2)\r\n sns.barplot(x=feat_vis['mean transaction value'].head(10), y=feat_vis.index[:10])\r\n\r\nfeat_plot('browser')\r\nfeat_plot('continent')\r\nfeat_plot('country')\r\nfeat_plot('operatingSystem')\r\nfeat_plot('source')\r\n\r\nfrom wordcloud import WordCloud\r\nsource = df_train['source']\r\nwordcloud2 = WordCloud(width=800, height=400).generate(' '.join(source))\r\nplt.figure( figsize=(12,10) )\r\nplt.imshow(wordcloud2)\r\nplt.axis(\"off\")\r\nplt.show()\r\n\r\n# Model\r\n\r\ncategorical_features = ['channelGrouping', 'browser', 'operatingSystem', 'deviceCategory', 'isMobile',\r\n 'continent', 'subContinent', 'country', 'city', 'keyword', 'medium', 'source'] \r\n\r\nfor col in categorical_features:\r\n lbl = LabelEncoder()\r\n lbl.fit(list(df_train[col].values.astype('str')))\r\n df_train[col] = lbl.transform(list(df_train[col].values.astype('str')))\r\n\r\n\r\ncat_feature=(df_train.dtypes == object) | (df_train.dtypes == bool)\r\ncat_cols = df_train.columns[cat_feature].tolist()\r\nle=LabelEncoder()\r\nle.fit(list(df_train[cat_cols].values.astype('str')))\r\ndf_train[cat_cols] = df_train[cat_cols].apply(lambda col:le.fit_transform(col).values.astype('str'))\r\n\r\nX=df_train.drop(['transactionRevenue'], axis=1)\r\ny=np.log1p(df_train['transactionRevenue'])\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 10)\r\n\r\nparams = {\r\n \"objective\" : \"regression\",\r\n \"metric\" : \"rmse\",\r\n \"num_leaves\" : 30,\r\n \"learning_rate\" : 0.1,\r\n \"bagging_fraction\" : 0.7, \r\n \"feature_fraction\" : 0.5,\r\n \"bagging_frequency\" : 5,\r\n \"bagging_seed\" : 2018,\r\n }\r\n\r\n\r\nlgtrain = lgbm.Dataset(X_train, label=y_train)\r\nlgval = lgbm.Dataset(X_test, label=y_test)\r\nlgb_model = lgbm.train(params, lgtrain, valid_sets=[lgval], num_boost_round=2000, early_stopping_rounds=100, verbose_eval=100)\r\n\r\npred_test = lgb_model.predict(X_test, num_iteration=lgb_model.best_iteration)\r\n\r\nfig, ax = plt.subplots(figsize=(8,12))\r\nlgbm.plot_importance(lgb_model, max_num_features=30, height=0.8, ax=ax)\r\nax.grid(False)\r\nplt.title(\"LightGBM - Feature Importance\", fontsize=15)\r\nplt.show()\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\ngrid= {\"min_child_weight\":[4,5,6],\"max_depth\":[-1,1,3,5], \"learning_rate\":[0.1,0.01,0.2]}\r\nlgb=LGBMRegressor(random_state=96,objective='regression',metric='rmse')\r\ngridsearch= GridSearchCV(lgb,param_grid=grid,cv=5)\r\ngridsearch.fit(X_train, y_train)\r\nprint(gridsearch.best_score_)\r\nprint(gridsearch.best_params_)\r\n\r\nlgb= LGBMRegressor(objective='regression',metric='rmse',learning_rate=0.1,min_child_weight=4)\r\nlgb.fit(X_train, y_train)\r\nlgb_pred = lgb.predict(X_test)\r\naccuracy = lgb.score(X_test,y_test)\r\n'Accuracy: ' + str(np.round(accuracy*100, 2)) + '%'\r\nmean_absolute_error(y_test, lgb_pred)\r\nmean_squared_error(y_test, lgb_pred)\r\nnp.sqrt(mean_squared_error(y_test, lgb_pred))\r\n\r\ncoefs = pd.Series(lgb.feature_importances_, index = X_train.columns)\r\nimp_coefs = pd.concat([coefs.sort_values().head(10),coefs.sort_values().tail(10)])\r\nimp_coefs.plot(kind = \"barh\")\r\nplt.xlabel(\"LGB coefficient\", weight='bold')\r\nplt.title(\"Feature importance in the LightGB Model\", weight='bold')\r\nplt.show()\r\n\r\n###################################################\r\n\r\n#pip install bayesian-optimization\r\n\r\nfrom bayes_opt import BayesianOptimization\r\n\r\ndef lgb_eval(num_leaves, num_iterations, feature_fraction,learning_rate, bagging_fraction,bagging_frequency, max_depth, lambda_l1, lambda_l2, min_split_gain, min_child_weight): \r\n params = {'application':'regression_l2','metric':'rmse', 'early_stopping_round':100}\r\n \r\n params[\"num_leaves\"] = int(round(num_leaves))\r\n params[\"num_iterations\"] = int(num_iterations)\r\n params[\"learning_rate\"] = learning_rate\r\n params['feature_fraction'] = max(min(feature_fraction, 1), 0)\r\n params['bagging_fraction'] = max(min(bagging_fraction, 1), 0)\r\n params['bagging_frequency'] = bagging_frequency \r\n params['max_depth'] = int(round(max_depth))\r\n params['lambda_l1'] = max(lambda_l1, 0)\r\n params['lambda_l2'] = max(lambda_l2, 0)\r\n params['min_split_gain'] = min_split_gain\r\n params['min_child_weight'] = min_child_weight\r\n \r\n dtrain = lgbm.Dataset(data=X_train, label=y_train, categorical_feature = categorical_features, free_raw_data=False)\r\n cv_result = lgbm.cv(params, dtrain,nfold=5, verbose_eval=200,stratified=False) \r\n \r\n #print(cv_result)\r\n # Bayesian optimization only knows how to maximize, not minimize, so return the negative RMSE\r\n return -1.0 * cv_result['rmse-mean'][-1]\r\n\r\n\r\nlgbBO = BayesianOptimization(lgb_eval, {'num_leaves': (30, 200),\r\n 'feature_fraction': (0.1, 0.9),\r\n 'learning_rate' : (0.0001,0.01),\r\n 'bagging_fraction': (0.8, 1),\r\n 'bagging_frequency':(5,10),\r\n 'num_iterations':(1000,5000),\r\n 'max_depth': (5, 10),\r\n 'lambda_l1': (0, 5),\r\n 'lambda_l2': (0, 3),\r\n 'min_split_gain': (0.001, 0.1),\r\n 'min_child_weight': (5, 50)}, random_state=0)\r\n\r\n# lgbBO.maximize(init_points=3, n_iter=5, acq='ei')\r\n\r\n"
] |
[
[
"matplotlib.pyplot.imshow",
"pandas.to_datetime",
"pandas.Series",
"sklearn.metrics.mean_absolute_error",
"sklearn.metrics.mean_squared_error",
"numpy.round",
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"numpy.log1p",
"matplotlib.pyplot.figure",
"numpy.log",
"matplotlib.pyplot.title",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.show",
"sklearn.model_selection.GridSearchCV",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlabel"
]
] |
CVPR2020/EnAET
|
[
"f490777980d20c68ca63764b7fc25537d7e72660"
] |
[
"Model/Classifier.py"
] |
[
"# /*******************************************************************************\n# * Author : CVPR2020_EnAET\n# *******************************************************************************/\nimport math\nimport torch.nn as nn\nimport numpy as np\nfrom Model.Basic_Block import Flatten,BasicBlock,GlobalAveragePooling\nfrom Model.Attention import Self_Attn\nfrom Model.TEBlock import TEBlock1\nclass Classifier(nn.Module):\n def __init__(self, _nChannels, _num_classes, _cls_type):\n super(Classifier, self).__init__()\n nChannels = _nChannels\n num_classes = _num_classes\n self.cls_type = _cls_type\n\n self.classifier = nn.Sequential()\n #first add a self attention part\n self.attention_module=Self_Attn(int(nChannels/8/8),'relu')\n\n if self.cls_type == 'MultLayer':\n nFeats = min(num_classes * 20, 2048)\n self.classifier.add_module('Flatten', Flatten())\n self.classifier.add_module('Liniear_1', nn.Linear(nChannels, nFeats, bias=False))\n self.classifier.add_module('BatchNorm_1', nn.BatchNorm1d(nFeats))\n self.classifier.add_module('ReLU_1', nn.ReLU(inplace=True))\n self.classifier.add_module('Liniear_2', nn.Linear(nFeats, nFeats, bias=False))\n self.classifier.add_module('BatchNorm2d', nn.BatchNorm1d(nFeats))\n self.classifier.add_module('ReLU_2', nn.ReLU(inplace=True))\n self.classifier.add_module('Liniear_F', nn.Linear(nFeats, num_classes))\n\n elif self.cls_type == 'MultLayerFC1':\n self.classifier.add_module('Batchnorm', nn.BatchNorm2d(nChannels / 8 / 8, affine=False))\n self.classifier.add_module('Flatten', Flatten())\n self.classifier.add_module('Liniear_F', nn.Linear(nChannels, num_classes))\n elif self.cls_type == 'MultLayerFC2':\n nFeats = min(num_classes * 20, 2048)\n self.classifier.add_module('Flatten', Flatten())\n self.classifier.add_module('Liniear_1', nn.Linear(nChannels, nFeats, bias=False))\n self.classifier.add_module('BatchNorm_1', nn.BatchNorm1d(nFeats))\n self.classifier.add_module('ReLU_1', nn.ReLU(inplace=True))\n self.classifier.add_module('Liniear_F', nn.Linear(nFeats, num_classes))\n\n elif self.cls_type == 'NIN_ConvBlock3':\n self.classifier.add_module('Block3_ConvB1', BasicBlock(nChannels, 192, 3))\n self.classifier.add_module('Block3_ConvB2', BasicBlock(192, 192, 1))\n self.classifier.add_module('Block3_ConvB3', BasicBlock(192, 192, 1))\n self.classifier.add_module('GlobalAvgPool', GlobalAveragePooling())\n self.classifier.add_module('Liniear_F', nn.Linear(192, num_classes))\n elif self.cls_type == 'Alexnet_conv5' or self.cls_type == 'Alexnet_conv4':\n if self.cls_type == 'Alexnet_conv4':\n block5 = nn.Sequential(\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n )\n self.classifier.add_module('ConvB5', block5)\n self.classifier.add_module('Pool5', nn.MaxPool2d(kernel_size=3, stride=2))\n self.classifier.add_module('Flatten', Flatten())\n self.classifier.add_module('Linear1', nn.Linear(256 * 6 * 6, 4096, bias=False))\n self.classifier.add_module('BatchNorm1', nn.BatchNorm1d(4096))\n self.classifier.add_module('ReLU1', nn.ReLU(inplace=True))\n self.classifier.add_module('Liniear2', nn.Linear(4096, 4096, bias=False))\n self.classifier.add_module('BatchNorm2', nn.BatchNorm1d(4096))\n self.classifier.add_module('ReLU2', nn.ReLU(inplace=True))\n self.classifier.add_module('LinearF', nn.Linear(4096, num_classes))\n elif self.cls_type == 'l3layer_conv3':\n nChannels = 128\n nChannels1 = 256\n nChannels2 = 512\n self.classifier.add_module('Block3_ConvB1', TEBlock1(nChannels1, nChannels2, 3))#no padding\n self.classifier.add_module('Block3_ConvB2', TEBlock1(nChannels2, nChannels1, 1))\n self.classifier.add_module('Block3_ConvB3', TEBlock1(nChannels1, nChannels, 1))\n self.classifier.add_module('GlobalAvgPool', GlobalAveragePooling())\n self.classifier.add_module('Linear_F', nn.Linear(128, num_classes))\n else:\n raise ValueError('Not recognized classifier type: %s' % self.cls_type)\n\n self.initilize()\n\n def forward(self, feat,label):\n attention=None\n if label:\n feat,attention=self.attention_module(feat)\n feat=self.classifier(feat)\n return feat,attention\n def initilize(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n if m.weight is not None:\n m.weight.data.fill_(1)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n fin = m.in_features\n fout = m.out_features\n std_val = np.sqrt(2.0 / fout)\n m.weight.data.normal_(0.0, std_val)\n if m.bias is not None:\n m.bias.data.fill_(0.0)\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"numpy.sqrt",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
jeffhsu3/tf-bind-transformer
|
[
"18e95c51b654ea1876a303565532b20a0a386233"
] |
[
"tf_bind_transformer/training_utils.py"
] |
[
"import torch\nfrom torch import nn\nfrom tf_bind_transformer.optimizer import get_optimizer\nfrom tf_bind_transformer.data import read_bed, collate_dl_outputs, get_dataloader, remap_df_add_experiment_target_cell\nfrom tf_bind_transformer.data import RemapAllPeakDataset, NegativePeakDataset, ScopedNegativePeakDataset\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n return val if exists(val) else d\n\n# helpers for logging and accumulating values across gradient steps\n\ndef accum_log(log, new_logs):\n for key, new_value in new_logs.items():\n old_value = log.get(key, 0.)\n log[key] = old_value + new_value\n return log\n\n# simple Trainer class\n\nclass Trainer(nn.Module):\n def __init__(\n self,\n model,\n *,\n remap_bed_file,\n negative_bed_file,\n factor_fasta_folder,\n fasta_file,\n train_chromosome_ids,\n valid_chromosome_ids,\n batch_size,\n context_length,\n lr = 3e-4,\n wd = 0.1,\n validate_every = 250,\n grad_clip_norm = None,\n grad_accum_every = 1,\n held_out_targets = [],\n held_out_cell_types = [],\n exclude_targets = [],\n exclude_cell_types = [],\n shuffle = False,\n train_sample_frac = 1.,\n valid_sample_frac = 1.,\n remap_sample_frac = 1.,\n shift_aug_range = (-2, 2),\n rc_aug = False,\n experiments_json_path = None,\n read_value_aux_loss = False,\n checkpoint_filename = './checkpoint.pt',\n include_scoped_negs = False,\n scoped_negs_remap_bed_path = None,\n scoped_negs_path = None,\n scoped_negs_exts = '.bed.bool.npy',\n include_biotypes_metadata_in_context = False,\n biotypes_metadata_path = None,\n include_biotypes_metadata_columns = ['germ_layer', 'cellline_cat'],\n biotypes_metadata_delimiter = ' | ',\n balance_sampling_by_target = True,\n valid_balance_sampling_by_target = None,\n ):\n super().__init__()\n self.model = model\n valid_balance_sampling_by_target = default(valid_balance_sampling_by_target, balance_sampling_by_target)\n\n remap_df = read_bed(remap_bed_file)\n\n if remap_sample_frac < 1:\n remap_df = remap_df.sample(frac = remap_sample_frac)\n\n remap_df = remap_df_add_experiment_target_cell(remap_df)\n\n neg_df = read_bed(negative_bed_file)\n\n self.ds = RemapAllPeakDataset(\n remap_df = remap_df,\n fasta_file = fasta_file,\n factor_fasta_folder = factor_fasta_folder,\n filter_chromosome_ids = train_chromosome_ids,\n exclude_targets = [*held_out_targets, *exclude_targets],\n exclude_cell_types = [*held_out_cell_types, *exclude_cell_types],\n context_length = context_length,\n remap_df_frac = train_sample_frac,\n shift_augs = shift_aug_range,\n rc_aug = rc_aug,\n experiments_json_path = experiments_json_path,\n include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,\n biotypes_metadata_path = biotypes_metadata_path,\n include_biotypes_metadata_columns = include_biotypes_metadata_columns,\n biotypes_metadata_delimiter = biotypes_metadata_delimiter,\n balance_sampling_by_target = balance_sampling_by_target\n )\n\n self.neg_ds = NegativePeakDataset(\n remap_df = remap_df,\n negative_df = neg_df,\n fasta_file = fasta_file,\n factor_fasta_folder = factor_fasta_folder,\n filter_chromosome_ids = train_chromosome_ids,\n exclude_targets = [*held_out_targets, *exclude_targets],\n exclude_cell_types = [*held_out_cell_types, *exclude_cell_types],\n context_length = context_length,\n experiments_json_path = experiments_json_path,\n include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,\n biotypes_metadata_path = biotypes_metadata_path,\n include_biotypes_metadata_columns = include_biotypes_metadata_columns,\n biotypes_metadata_delimiter = biotypes_metadata_delimiter,\n balance_sampling_by_target = balance_sampling_by_target\n )\n\n self.valid_ds = RemapAllPeakDataset(\n remap_df = remap_df,\n fasta_file = fasta_file,\n factor_fasta_folder = factor_fasta_folder,\n include_targets = held_out_targets,\n include_cell_types = held_out_cell_types,\n exclude_targets = exclude_targets,\n exclude_cell_types = exclude_cell_types,\n filter_chromosome_ids = valid_chromosome_ids,\n context_length = context_length,\n remap_df_frac = valid_sample_frac,\n shift_augs = shift_aug_range,\n rc_aug = rc_aug,\n experiments_json_path = experiments_json_path,\n include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,\n biotypes_metadata_path = biotypes_metadata_path,\n include_biotypes_metadata_columns = include_biotypes_metadata_columns,\n biotypes_metadata_delimiter = biotypes_metadata_delimiter,\n balance_sampling_by_target = valid_balance_sampling_by_target\n )\n\n self.valid_neg_ds = NegativePeakDataset(\n remap_df = remap_df,\n negative_df = neg_df,\n fasta_file = fasta_file,\n factor_fasta_folder = factor_fasta_folder,\n filter_chromosome_ids = valid_chromosome_ids,\n include_targets = held_out_targets,\n include_cell_types = held_out_cell_types,\n exclude_targets = exclude_targets,\n exclude_cell_types = exclude_cell_types,\n context_length = context_length,\n experiments_json_path = experiments_json_path,\n include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,\n biotypes_metadata_path = biotypes_metadata_path,\n include_biotypes_metadata_columns = include_biotypes_metadata_columns,\n biotypes_metadata_delimiter = biotypes_metadata_delimiter,\n balance_sampling_by_target = valid_balance_sampling_by_target\n )\n\n self.include_scoped_negs = include_scoped_negs\n\n self.dl = get_dataloader(self.ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size)\n self.neg_dl = get_dataloader(self.neg_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size)\n\n if include_scoped_negs:\n self.scoped_neg_ds = ScopedNegativePeakDataset(\n fasta_file = fasta_file,\n factor_fasta_folder = factor_fasta_folder,\n numpy_folder_with_scoped_negatives = scoped_negs_path,\n remap_bed_file = scoped_negs_remap_bed_path,\n exts = scoped_negs_exts,\n exclude_targets = [*held_out_targets, *exclude_targets],\n exclude_cell_types = [*held_out_cell_types, *exclude_cell_types],\n filter_chromosome_ids = train_chromosome_ids,\n include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,\n biotypes_metadata_path = biotypes_metadata_path,\n include_biotypes_metadata_columns = include_biotypes_metadata_columns,\n biotypes_metadata_delimiter = biotypes_metadata_delimiter,\n balance_sampling_by_target = balance_sampling_by_target\n )\n\n self.scoped_neg_dl = get_dataloader(self.scoped_neg_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size)\n\n self.valid_dl = get_dataloader(self.valid_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size)\n self.valid_neg_dl = get_dataloader(self.valid_neg_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size)\n\n self.aux_read_value_loss = model.aux_read_value_loss\n\n if self.aux_read_value_loss:\n print(f'training with read value aux loss')\n\n self.optim = get_optimizer(model.parameters(), lr = lr, wd = wd)\n\n self.grad_accum_every = grad_accum_every\n self.grad_clip_norm = grad_clip_norm\n\n self.validate_every = validate_every\n self.register_buffer('steps', torch.Tensor([0.]))\n\n self.checkpoint_filename = checkpoint_filename\n\n def forward(\n self,\n finetune_enformer_ln_only = True,\n **kwargs\n ):\n grad_accum_every = self.grad_accum_every\n curr_step = int(self.steps.item())\n self.model.train()\n\n log = {}\n\n for _ in range(self.grad_accum_every):\n dl_outputs = [next(self.dl), next(self.neg_dl)]\n\n if self.include_scoped_negs:\n dl_outputs.append(next(self.scoped_neg_dl))\n\n seq, tf_aa, contextual_texts, peaks_nr, read_value, binary_target = collate_dl_outputs(*dl_outputs)\n seq, binary_target, read_value, peaks_nr = seq.cuda(), binary_target.cuda(), read_value.cuda(), peaks_nr.cuda()\n\n loss, aux_loss = self.model(\n seq,\n target = binary_target,\n aa = tf_aa,\n contextual_free_text = contextual_texts,\n finetune_enformer_ln_only = finetune_enformer_ln_only,\n read_value = read_value,\n peaks_nr = peaks_nr,\n **kwargs\n )\n\n total_loss = self.model.combine_losses(loss, aux_loss)\n\n log = accum_log(log, {\n 'loss': loss.item() / grad_accum_every,\n 'aux_loss': aux_loss.item() / grad_accum_every,\n 'total_loss': total_loss.item() / grad_accum_every\n })\n\n (total_loss / self.grad_accum_every).backward()\n\n print(f'{curr_step} loss: {log[\"total_loss\"]}')\n\n if exists(self.grad_clip_norm):\n nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip_norm)\n\n self.optim.step()\n self.optim.zero_grad()\n\n if (curr_step % self.validate_every) == 0:\n self.model.eval()\n\n for _ in range(self.grad_accum_every):\n seq, tf_aa, contextual_texts, peaks_nr, read_value, binary_target = collate_dl_outputs(next(self.valid_dl), next(self.valid_neg_dl))\n seq, binary_target = seq.cuda(), binary_target.cuda()\n\n valid_logits = self.model(\n seq,\n aa = tf_aa,\n contextual_free_text = contextual_texts,\n )\n\n valid_loss = self.model.loss_fn(valid_logits, binary_target.float())\n valid_accuracy = ((valid_logits.sigmoid() > 0.5).int() == binary_target).sum() / (binary_target.numel())\n\n log = accum_log(log, {\n 'valid_loss': valid_loss.item() / grad_accum_every,\n 'valid_accuracy': valid_accuracy.item() / grad_accum_every\n })\n\n print(f'{curr_step} valid loss: {log[\"valid_loss\"]}')\n print(f'{curr_step} valid accuracy: {log[\"valid_accuracy\"]}')\n\n if curr_step > 0:\n torch.save(self.model.state_dict(), self.checkpoint_filename)\n\n self.steps += 1\n return log\n"
] |
[
[
"torch.Tensor"
]
] |
Aaron506/caltech-ee148-spring2020-hw02
|
[
"aefe6755b9210f70f2983ec5c046f60ff12c5535"
] |
[
"run_predictions.py"
] |
[
"import os\nimport numpy as np\nimport json\nfrom PIL import Image, ImageDraw, ImageFont\n\n# Aaron added\nimport matplotlib.pyplot as plt\nimport pdb\nfrom numba import jit\n# Only used to provide a standard data structure to speed up clustering\nfrom unionfind import unionfind\n\nif __name__ == '__main__':\n plt.close('all')\n \n ### User-Defined Constants\n ACCEPT = 0.5\n\ndef visualize(I, output, disp=False):\n im = Image.fromarray(I)\n draw = ImageDraw.Draw(im)\n for box in output:\n y0, x0, y1, x1, score = box\n draw.rectangle([x0, y0, x1, y1])\n fnt = ImageFont.truetype(\"arial.ttf\", 10)\n draw.text((max(x0-15,0),max(y0-15, 0)), str(score)[:3], font=fnt, fill=(255,255,255))\n if disp:\n im.show()\n return np.asarray(im)\n\n# Aaron: Load the templates to be used by the modified match filter\ndef loadTemplates():\n kernels = []\n I = np.array(Image.open('data/RedLights2011_Medium/RL-155.jpg')) \n kernels.append(I[323:345, 206:215, :])\n # Comment out for weak approach\n # kernels.append(I[287:314, 347:356, :])\n # kernels.append(I[332:352, 425:434, :])\n # I = np.array(Image.open('data/RedLights2011_Medium/RL-259.jpg')) \n # kernels.append(I[220:236, 269:281, :])\n # kernels.append(I[222:238, 315:321, :])\n # I = np.array(Image.open('data/RedLights2011_Medium/RL-062.jpg'))\n # # kernels.append(I[239:254, 157:162, :])\n # kernels.append(I[196:223, 463:475, :])\n # # kernels.append(I[242:251, 349:353, :])\n return kernels\n\ndef findEdges(mask, disp=False):\n edgeMask = mask.copy()\n # Classify labeled pixel as edge if it has <= 3 neighbors in \n # cardinal directions\n numNeighbors = edgeMask[1:-1,2:].astype(int) + edgeMask[1:-1,:-2].astype(int) + edgeMask[:-2,1:-1].astype(int) + edgeMask[2:,1:-1].astype(int)\n newNeighbors = np.zeros(numNeighbors.shape)\n \n # rnd = 0\n while np.sum(np.abs(newNeighbors - numNeighbors)) > 0:\n # print('On iteration', rnd)\n numNeighbors = newNeighbors\n padded = np.zeros(np.shape(edgeMask))\n padded[1:-1,1:-1] = (numNeighbors == 4)\n # Fill in any pixel that is surrounded\n edgeMask += padded.astype(bool) \n # Repeat now that filled in holes\n newNeighbors = edgeMask[1:-1,2:].astype(int) + edgeMask[1:-1,:-2].astype(int) + edgeMask[:-2,1:-1].astype(int) + edgeMask[2:,1:-1].astype(int)\n # rnd += 1\n \n padded = np.ones(np.shape(edgeMask)) # Enable all pixels on perimeter edge\n padded[1:-1,1:-1] = (newNeighbors <= 3) \n edgeMask = padded * edgeMask \n \n if disp:\n plt.figure()\n plt.imshow(edgeMask)\n \n return np.transpose(np.where(edgeMask)), edgeMask \n\ndef findRed(I, disp=False):\n im = Image.fromarray(I)\n hsv = np.asarray(im.convert('HSV'))\n\n hue = (hsv[:,:,0] < 50) + (hsv[:,:,0] > 240) > 0\n sat = (hsv[:,:,1] > 130)\n value = (hsv[:,:,2] > 130)\n \n mask = hue * sat * value\n if disp:\n im = Image.fromarray(mask * 255)\n # im.show()\n plt.figure()\n plt.imshow(np.array(im))\n return np.transpose(np.where(mask)), mask\n\n# def heatmapCluster(heatmap, similarityThresh):\n# # Left to right\n# # Up to down\n# # Take the larger of the two as the edge\n \n# original = heatmap[1:-1, 1:-1]\n# right = heatmap[1:-1, 2:]\n# left = heatmap[1:-1, :-2]\n# up = heatmap[:-2, 1:-1]\n# down = heatmap[2:, 1:-1]\n \n# edges = (original - right > similarityThresh) + \\\n# (original - left > similarityThresh) + \\\n# (original - up > similarityThresh) + \\\n# (original - down > similarityThresh)\n \n# pdb.set_trace()\n \n# Sliding window clustering where thresh dictates square window size\n# mask should give \ndef clusterPixels(pixels, mask, thresh):\n \n pixelMap = {tuple(pixels[i]) : i for i in range(len(pixels))} \n \n u = unionfind(len(pixels))\n for i in range(mask.shape[0] - thresh):\n for j in range(mask.shape[1] - thresh):\n group = np.transpose(np.where(mask[i:i+thresh, j:j+thresh]))\n for k in range(len(group)):\n for l in range(k):\n try:\n u.unite(pixelMap[tuple(np.array([i,j]) + group[k])], pixelMap[tuple(np.array([i,j]) + group[l])])\n except:\n pdb.set_trace()\n groups = u.groups()\n \n clusters = [[pixels[i,:] for i in group] for group in groups]\n return clusters\n\n@jit\n# Aaron: For simplicity remove stride\ndef compute_convolution(I, T):\n '''\n This function takes an image <I> and a template <T> (both numpy arrays) \n and returns a heatmap where each grid represents the output produced by \n convolution at each location. You can add optional parameters (e.g. stride, \n window_size, padding) to create additional functionality. \n '''\n (n_rows,n_cols,n_channels) = np.shape(I)\n \n ### Aaron: Added code for computing convolution ###\n kernel = T.astype(np.float32)\n K = kernel.flatten()\n K /= np.sqrt(np.sum(kernel * kernel)) # Normalize both kernel and ultimately image patch\n \n paddedIm = np.ones((I.shape[0] + kernel.shape[0]-1, I.shape[1] + kernel.shape[1]-1, 3))\n padSize = (int((kernel.shape[0]-1)/2), int((kernel.shape[1]-1)/2))\n for i in range(3):\n paddedIm[:,:,i] *= np.mean(I[:,:,i])\n paddedIm[padSize[0]:I.shape[0] + padSize[0], padSize[1]:I.shape[1] + padSize[1], :] = I\n \n # Now, do sliding window\n response = np.zeros((paddedIm.shape[0] - kernel.shape[0] + 1, paddedIm.shape[1] - kernel.shape[1] + 1))\n \n for i in range(response.shape[0]):\n for j in range(response.shape[1]):\n imPatch = (paddedIm[i:i+kernel.shape[0],j:j+kernel.shape[1],:]).astype(np.float32).flatten()\n imPatch /= np.sqrt(np.sum(imPatch * imPatch))\n response[i,j] = np.sum(imPatch * K)\n \n assert response.shape[0] == n_rows\n assert response.shape[1] == n_cols\n \n return response\n\ndef predict_boxes(I, heatmap, ACCEPT):\n '''\n This function takes heatmap and returns the bounding boxes and associated\n confidence scores.\n '''\n output = []\n \n pixels, redMask = findRed(I, False)\n\n # Consider the intersection of where heatmap is red and also sufficiently\n # close to template\n mask = redMask * (heatmap > ACCEPT)\n \n pixels, edgeMask = findEdges(mask)\n\n # 3 is cluster dist threshold\n clusters = clusterPixels(pixels, edgeMask, 3) \n\n # Remove clusters which are too small or too large\n clusters = [cluster for cluster in clusters if len(cluster) > 5 and len(cluster) < 120]\n \n im = Image.fromarray((heatmap * 255).astype(np.uint8))\n\n costs = []\n circles = []\n for cluster in clusters:\n pixels = np.array(cluster) # Nx2 now\n # Use center of mass as the center, look at how much distance of points\n # from center fluctuates, if too high then reject\n cm = np.mean(pixels, axis=0)\n radii = np.linalg.norm(pixels - cm, axis=1)\n r = np.mean(radii)\n cost = np.std(radii) / r # Fractional uncertainty/error\n \n costs.append(cost)\n \n y, x = cm\n circles.append([x, y, r])\n draw = ImageDraw.Draw(im)\n draw.point([(x-2, y), (x-1, y), (x, y), (x+1, y), (x+2, y), (x, y-2), (x, y-1), (x, y+1), (x, y+2)])\n draw.ellipse((x-r, y-r, x+r, y+r))\n \n # Use fixed aspect ratio for traffic light\n xK, yK, rK = (13, 13, 7)\n hK, wK = 28, 28 # Use a square bounding box to match annotations\n scale = r / rK \n xK = int(scale * xK)\n yK = int(scale * yK)\n hK = int(scale * hK)\n wK = int(scale * wK)\n start = (int(y - yK), int(x - xK))\n \n # Need to account for partially occluded traffic light\n tl_row = max(start[0], 0)\n tl_col = max(start[1], 0)\n br_row = min(start[0]+ hK, I.shape[0])\n br_col = min(start[1]+ wK, I.shape[1])\n \n # Use heuristic combination of highest contained response value and \n # circle cost to dictate confidence\n # Response value ~ 0.8 and cost ~ 0.3 to give rough sense\n score = np.clip(np.max(heatmap[tl_row:br_row, tl_col:br_col]) - 1/2 * cost, 0, 1)\n\n box = [tl_row,tl_col,br_row,br_col,score]\n output.append(box)\n \n draw = ImageDraw.Draw(im)\n y0, x0, y1, x1, _ = box\n draw.rectangle([x0, y0, x1, y1])\n \n fnt = ImageFont.truetype(\"arial.ttf\", 10)\n \n draw.text((max(x0-15,0),max(y0-15, 0)), str(score)[:3], font=fnt, fill=0)\n \n return output, np.array(im), redMask, edgeMask\n\ndef detect_red_light_mf(I, name, disp=False):\n '''\n This function takes a numpy array <I> and returns a list <output>.\n The length of <output> is the number of bounding boxes predicted for <I>. \n Each entry of <output> is a list <[row_TL,col_TL,row_BR,col_BR,score]>. \n The first four entries are four integers specifying a bounding box \n (the row and column index of the top left corner and the row and column \n index of the bottom right corner).\n <score> is a confidence score ranging from 0 to 1. \n\n Note that PIL loads images in RGB order, so:\n I[:,:,0] is the red channel\n I[:,:,1] is the green channel\n I[:,:,2] is the blue channel\n '''\n \n # Visualize the initial image\n if disp:\n plt.figure()\n plt.imshow(I)\n \n # Create a new folder\n fname = preds_path + '/' + name\n os.makedirs(fname, exist_ok=True)\n \n kernels = loadTemplates()\n \n heatmaps = []\n for kernel in kernels:\n heatmaps.append(compute_convolution(I, kernel))\n heatmap = np.max(heatmaps, axis=0) # maxpool\n \n # Visualize the output response\n if disp:\n plt.figure()\n plt.imshow(heatmap, cmap='gray')\n \n Image.fromarray((heatmap * 255).astype(np.uint8)).save(fname + '/heatmap.jpg', quality=95)\n output, visualizedIm, redMask, edgeMask = predict_boxes(I, heatmap, ACCEPT)\n\n Image.fromarray(visualizedIm).save(fname + '/boxedHeatmap.jpg')\n \n Image.fromarray(visualize(I, output).astype(np.uint8)).save(fname + '/ogBoxed.jpg', quality=95)\n \n Image.fromarray((redMask * 255).astype(np.uint8)).save(fname + '/redMask.jpg', quality=95)\n \n Image.fromarray((edgeMask * 255).astype(np.uint8)).save(fname + '/edgeMask.jpg', quality=95)\n\n '''\n END YOUR CODE\n '''\n\n for i in range(len(output)):\n assert len(output[i]) == 5\n assert (output[i][4] >= 0.0) and (output[i][4] <= 1.0)\n\n return output\n\n# Note that you are not allowed to use test data for training.\n# set the path to the downloaded data:\ndata_path = 'data/RedLights2011_Medium'\n\n# load splits: \nsplit_path = 'data/hw02_splits'\nfile_names_train = np.load(os.path.join(split_path,'file_names_train.npy'))\nfile_names_test = np.load(os.path.join(split_path,'file_names_test.npy'))\n\n# set a path for saving predictions:\npreds_path = 'data/hw02_preds'\nos.makedirs(preds_path, exist_ok=True) # create directory if needed\n\n# Set this parameter to True when you're done with algorithm development:\ndone_tweaking = True\n\n#### TODO: REMOVE ####\n# Used RL-155, RL-259, RL-062 traffic light templates\n# file_names_train = ['RL-062.jpg']\n# file_names_train = [] # Temporary so skip right to test\n\n'''\nMake predictions on the training set.\n'''\npreds_train = {}\nfor i in range(len(file_names_train)):\n if i % 10 == 0:\n print('Train progress = ' + str(i/len(file_names_train)))\n \n # read image using PIL:\n I = Image.open(os.path.join(data_path,file_names_train[i]))\n\n # convert to numpy array:\n I = np.asarray(I)\n\n preds_train[file_names_train[i]] = detect_red_light_mf(I, file_names_train[i][:-4])\n\n# save preds (overwrites any previous predictions!)\nwith open(os.path.join(preds_path,'preds_train.json'),'w') as f:\n json.dump(preds_train,f)\n\nif done_tweaking:\n '''\n Make predictions on the test set. \n '''\n preds_test = {}\n for i in range(len(file_names_test)):\n if i % 10 == 0:\n print('Test progress = ' + str(i/len(file_names_test)))\n # read image using PIL:\n I = Image.open(os.path.join(data_path,file_names_test[i]))\n\n # convert to numpy array:\n I = np.asarray(I)\n\n preds_test[file_names_test[i]] = detect_red_light_mf(I, file_names_test[i][:-4])\n\n # save preds (overwrites any previous predictions!)\n with open(os.path.join(preds_path,'preds_test.json'),'w') as f:\n json.dump(preds_test,f)\n"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.abs",
"numpy.asarray",
"numpy.linalg.norm",
"numpy.ones",
"numpy.max",
"numpy.std",
"numpy.shape",
"numpy.mean",
"matplotlib.pyplot.close",
"numpy.where",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] |
aliayub7/CBCL
|
[
"23b92dc4323ccc97984905e616fc26a33e387e52"
] |
[
"main_file.py"
] |
[
"\"\"\"\nComplete CBCL code\n\"\"\"\n\nimport numpy as np\nfrom copy import deepcopy\nimport pickle\nfrom multiprocessing import Pool\nfrom sklearn.model_selection import train_test_split\nfrom Functions import get_centroids\nfrom Functions import check_reduce_centroids\nfrom Functions import get_test_accuracy\nfrom get_incremental_data import getIncrementalData\nimport random\n\nseed = random.randint(0,1000)\nrandom.seed(seed)\nnp.random.seed(seed)\n\nwith open('CIFAR_Resnet34_train_features.data', 'rb') as filehandle:\n train_features = pickle.load(filehandle)\nwith open('CIFAR_Resnet34_test_features.data', 'rb') as filehandle:\n test_features = pickle.load(filehandle)\nwith open('CIFAR_Resnet34_train_labels.data', 'rb') as filehandle:\n train_labels = pickle.load(filehandle)\nwith open('CIFAR_Resnet34_test_labels.data', 'rb') as filehandle:\n test_labels = pickle.load(filehandle)\n\ndistance_metric = 'euclidean'\nclustering_type = 'Agg_Var'\nfull_classes = 100\ntotal_classes = 2\nk_shot = 10\ntotal_centroids_limit = 7500\ncurrent_total_centroids = 0\ndistance_threshold = 17\nk = 1\n\nfor iterations in range(0,1):\n # get incremental data\n incremental_data_creator = getIncrementalData(train_features,train_labels,test_features,test_labels,full_classes=full_classes,seed=seed)\n incremental_data_creator.incremental_data(total_classes=total_classes,limiter=full_classes)\n train_features_increment = incremental_data_creator.train_features_increment\n train_labels_increment = incremental_data_creator.train_labels_increment\n test_features_increment = incremental_data_creator.test_features_increment\n test_labels_increment = incremental_data_creator.test_labels_increment\n\n complete_x_test = []\n complete_y_test = []\n complete_centroids = []\n complete_centroid_labels = []\n total_num = []\n full_total_num = []\n\n # for the complete number of increments cluster and test\n for increment in range(0,int(100/total_classes)):\n total_num.extend([0 for y in range(total_classes)])\n x_test = test_features_increment[increment]\n y_test = test_labels_increment[increment]\n\n # get some random k_shot images for each class\n x_train_2,x_test_2,y_train_2,y_test_2 = train_test_split(train_features_increment[increment],train_labels_increment[increment],test_size=1)\n total_num_temp = [0 for x in range(0,total_classes)]\n x_train_increment = []\n y_train_increment = []\n for i in range(0,len(y_train_2)):\n if total_num_temp[y_train_2[i]-(increment*total_classes)]<k_shot:\n total_num_temp[y_train_2[i]-(increment*total_classes)]+=1\n x_train_increment.append(x_train_2[i])\n y_train_increment.append(y_train_2[i])\n print ('number of training images',len(y_train_increment))\n\n ### CLUSTERING PHASE ###\n x_train_increment = train_features_increment[increment]\n y_train_increment = train_labels_increment[increment]\n train_data = [[] for y in range(total_classes)]\n for i in range(0,len(y_train_increment)):\n train_data[y_train_increment[i]-(increment*total_classes)].append(x_train_increment[i])\n total_num[y_train_increment[i]]+=1\n weighting = np.divide([1 for x in range(0,(len(total_num)))],total_num)\n weighting = np.divide(weighting,np.sum(weighting))\n\n centroids = [[[0 for x in range(len(x_train_increment[0]))]] for y in range(total_classes)]\n train_data = [[] for y in range(total_classes)]\n for i in range(0,len(y_train_increment)):\n train_data[y_train_increment[i]-(increment*total_classes)].append(x_train_increment[i])\n\n # for multiprocessing\n train_pack = []\n for i in range(0,total_classes):\n train_pack.append([train_data[i],distance_threshold,clustering_type])\n my_pool = Pool(total_classes)\n centroids = my_pool.map(get_centroids,train_pack)\n my_pool.close()\n exp_centroids = 0\n for i in range(0,len(centroids)):\n exp_centroids+=len(centroids[i])\n\n # remove centroids to keep total centroids in limit\n complete_centroids = check_reduce_centroids(complete_centroids,current_total_centroids,exp_centroids,total_centroids_limit,increment,total_classes)\n complete_centroids.extend(centroids)\n total_centroids = 0\n for i in range(0,len(complete_centroids)):\n total_centroids+=len(complete_centroids[i])\n current_total_centroids = total_centroids\n\n ### TESTING PHASE ###\n complete_x_test.extend(x_test)\n complete_y_test.extend(y_test)\n test_pack = []\n\n test_pack=[complete_x_test,complete_y_test,complete_centroids,k,total_classes+(increment*total_classes),weighting]\n test_accuracy = get_test_accuracy(test_pack)\n accuracies = []\n predicted_label = []\n print (\"test_accuracy\", test_accuracy)\n"
] |
[
[
"numpy.sum",
"sklearn.model_selection.train_test_split",
"numpy.random.seed"
]
] |
LaudateCorpus1/llvm-project-staging
|
[
"cc926dc3a87af7023aa9b6c392347a0a8ed6949b"
] |
[
"mlir/test/python/dialects/sparse_tensor/test_SpMM.py"
] |
[
"# RUN: SUPPORT_LIB=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext %PYTHON %s | FileCheck %s\n\nimport ctypes\nimport numpy as np\nimport os\n\nimport mlir.all_passes_registration\n\nfrom mlir import ir\nfrom mlir import runtime as rt\nfrom mlir import execution_engine\nfrom mlir import passmanager\n\nfrom mlir.dialects import sparse_tensor as st\nfrom mlir.dialects import builtin\nfrom mlir.dialects.linalg.opdsl import lang as dsl\n\n\ndef run(f):\n print('\\nTEST:', f.__name__)\n f()\n return f\n\n\[email protected]_structured_op\ndef matmul_dsl(\n A=dsl.TensorDef(dsl.T, dsl.S.M, dsl.S.K),\n B=dsl.TensorDef(dsl.T, dsl.S.K, dsl.S.N),\n C=dsl.TensorDef(dsl.T, dsl.S.M, dsl.S.N, output=True)):\n C[dsl.D.m, dsl.D.n] += A[dsl.D.m, dsl.D.k] * B[dsl.D.k, dsl.D.n]\n\n\ndef build_SpMM(attr: st.EncodingAttr):\n \"\"\"Build SpMM kernel.\n\n This method generates a linalg op with for matrix multiplication using\n just the Python API. Effectively, a generic linalg op is constructed\n that computes C(i,j) += A(i,k) * B(k,j) for annotated matrix A.\n \"\"\"\n module = ir.Module.create()\n f64 = ir.F64Type.get()\n a = ir.RankedTensorType.get([3, 4], f64, attr)\n b = ir.RankedTensorType.get([4, 2], f64)\n c = ir.RankedTensorType.get([3, 2], f64)\n arguments = [a, b, c]\n with ir.InsertionPoint(module.body):\n\n @builtin.FuncOp.from_py_func(*arguments)\n def spMxM(*args):\n return matmul_dsl(args[0], args[1], outs=[args[2]])\n\n return module\n\n\ndef boilerplate(attr: st.EncodingAttr):\n \"\"\"Returns boilerplate main method.\n\n This method sets up a boilerplate main method that takes three tensors\n (a, b, c), converts the first tensor a into s sparse tensor, and then\n calls the sparse kernel for matrix multiplication. For convenience,\n this part is purely done as string input.\n \"\"\"\n return f\"\"\"\nfunc @main(%ad: tensor<3x4xf64>, %b: tensor<4x2xf64>, %c: tensor<3x2xf64>) -> tensor<3x2xf64>\n attributes {{ llvm.emit_c_interface }} {{\n %a = sparse_tensor.convert %ad : tensor<3x4xf64> to tensor<3x4xf64, {attr}>\n %0 = call @spMxM(%a, %b, %c) : (tensor<3x4xf64, {attr}>,\n tensor<4x2xf64>,\n tensor<3x2xf64>) -> tensor<3x2xf64>\n return %0 : tensor<3x2xf64>\n}}\n\"\"\"\n\n\ndef build_compile_and_run_SpMM(attr: st.EncodingAttr, support_lib: str,\n compiler):\n # Build.\n module = build_SpMM(attr)\n func = str(module.operation.regions[0].blocks[0].operations[0].operation)\n module = ir.Module.parse(func + boilerplate(attr))\n\n # Compile.\n compiler(module)\n engine = execution_engine.ExecutionEngine(\n module, opt_level=0, shared_libs=[support_lib])\n\n # Set up numpy input and buffer for output.\n a = np.array(\n [[1.1, 0.0, 0.0, 1.4], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 3.3, 0.0]],\n np.float64)\n b = np.array([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0]], np.float64)\n c = np.zeros((3, 2), np.float64)\n out = np.zeros((3, 2), np.float64)\n\n mem_a = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(a)))\n mem_b = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(b)))\n mem_c = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(c)))\n mem_out = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(out)))\n\n # Invoke the kernel and get numpy output.\n # Built-in bufferization uses in-out buffers.\n # TODO: replace with inplace comprehensive bufferization.\n engine.invoke('main', mem_out, mem_a, mem_b, mem_c)\n\n # Sanity check on computed result.\n expected = np.matmul(a, b);\n c = rt.ranked_memref_to_numpy(mem_out[0])\n if np.allclose(c, expected):\n pass\n else:\n quit(f'FAILURE')\n\n\nclass SparseCompiler:\n \"\"\"Sparse compiler passes.\"\"\"\n\n def __init__(self, options: str):\n pipeline = (\n f'sparsification{{{options}}},'\n f'sparse-tensor-conversion,'\n f'builtin.func(convert-linalg-to-loops,convert-vector-to-scf),'\n f'convert-scf-to-std,'\n f'func-bufferize,'\n f'tensor-constant-bufferize,'\n f'builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),'\n f'convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},'\n f'convert-memref-to-llvm,'\n f'convert-std-to-llvm')\n self.pipeline = pipeline\n\n def __call__(self, module: ir.Module):\n passmanager.PassManager.parse(self.pipeline).run(module)\n\n\n# CHECK-LABEL: TEST: testSpMM\n# CHECK: Passed 72 tests\n@run\ndef testSpMM():\n # Obtain path to runtime support library.\n support_lib = os.getenv('SUPPORT_LIB')\n assert os.path.exists(support_lib), f'{support_lib} does not exist'\n\n with ir.Context() as ctx, ir.Location.unknown():\n count = 0\n # Fixed compiler optimization strategy.\n # TODO: explore state space here too\n par = 0\n vec = 0\n vl = 1\n e = False\n opt = (f'parallelization-strategy={par} '\n f'vectorization-strategy={vec} '\n f'vl={vl} enable-simd-index32={e}')\n # Exhaustive loop over various ways to annotate a kernel with\n # a *single* sparse tensor. Even this subset already gives\n # quite a large state space!\n levels = [[st.DimLevelType.dense, st.DimLevelType.dense],\n [st.DimLevelType.dense, st.DimLevelType.compressed],\n [st.DimLevelType.compressed, st.DimLevelType.dense],\n [st.DimLevelType.compressed, st.DimLevelType.compressed]]\n orderings = [\n ir.AffineMap.get_permutation([0, 1]),\n ir.AffineMap.get_permutation([1, 0])\n ]\n bitwidths = [0, 8, 32]\n for level in levels:\n for ordering in orderings:\n for pwidth in bitwidths:\n for iwidth in bitwidths:\n attr = st.EncodingAttr.get(level, ordering, pwidth, iwidth)\n compiler = SparseCompiler(options=opt)\n build_compile_and_run_SpMM(attr, support_lib, compiler)\n count = count + 1\n print('Passed ', count, 'tests')\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.allclose",
"numpy.matmul"
]
] |
andrea-silvi/pointnet.pytorch
|
[
"6aa48c12f6b507dedb3af716c61310ac574aea91"
] |
[
"gcnn/gcnn_model.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author: Yue Wang\n@Contact: [email protected]\n@File: model.py\n@Time: 2018/10/13 6:35 PM\n\"\"\"\n\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef knn(x, k):\n inner = -2 * torch.matmul(x.transpose(2, 1), x)\n xx = torch.sum(x ** 2, dim=1, keepdim=True)\n pairwise_distance = -xx - inner - xx.transpose(2, 1)\n\n idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)\n return idx\n\ndef get_graph_feature(x, k=20, idx=None):\n batch_size = x.size(0)\n num_points = x.size(2)\n x = x.view(batch_size, -1, num_points)\n if idx is None:\n idx = knn(x, k=k) # (batch_size, num_points, k)\n device = torch.device('cuda')\n\n idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points\n\n idx = idx + idx_base\n\n idx = idx.view(-1)\n\n _, num_dims, _ = x.size()\n\n x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) #\n # batch_size * num_points * k + range(0, batch_size*num_points)\n feature = x.view(batch_size * num_points, -1)[idx, :]\n feature = feature.view(batch_size, num_points, k, num_dims)\n x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)\n\n feature = torch.cat((feature - x, x), dim=3).permute(0, 3, 1, 2).contiguous()\n\n return feature\n\n\nclass DGCNN(nn.Module):\n def __init__(self, args):\n super(DGCNN, self).__init__()\n self.args = args\n self.k = args.k\n\n self.bn1 = nn.BatchNorm2d(64)\n self.bn2 = nn.BatchNorm2d(64)\n self.bn3 = nn.BatchNorm2d(128)\n self.bn4 = nn.BatchNorm2d(256)\n self.bn5 = nn.BatchNorm1d(args.size_encoder)\n\n self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False),\n self.bn1,\n nn.LeakyReLU(negative_slope=0.2))\n self.conv2 = nn.Sequential(nn.Conv2d(64 * 2, 64, kernel_size=1, bias=False),\n self.bn2,\n nn.LeakyReLU(negative_slope=0.2))\n self.conv3 = nn.Sequential(nn.Conv2d(64 * 2, 128, kernel_size=1, bias=False),\n self.bn3,\n nn.LeakyReLU(negative_slope=0.2))\n self.conv4 = nn.Sequential(nn.Conv2d(128 * 2, 256, kernel_size=1, bias=False),\n self.bn4,\n nn.LeakyReLU(negative_slope=0.2))\n self.conv5 = nn.Sequential(nn.Conv1d(512, args.size_encoder, kernel_size=1, bias=False),\n self.bn5,\n nn.LeakyReLU(negative_slope=0.2))\n\n\n def forward(self, x):\n batch_size = x.size(0)\n x = get_graph_feature(x, k=self.k)\n x = self.conv1(x)\n x1 = x.max(dim=-1, keepdim=False)[0]\n x = get_graph_feature(x1, k=self.k)\n x = self.conv2(x)\n x2 = x.max(dim=-1, keepdim=False)[0]\n x = get_graph_feature(x2, k=self.k)\n x = self.conv3(x)\n x3 = x.max(dim=-1, keepdim=False)[0]\n x = get_graph_feature(x3, k=self.k)\n x = self.conv4(x)\n x4 = x.max(dim=-1, keepdim=False)[0]\n x = torch.cat((x1, x2, x3, x4), dim=1)\n x = self.conv5(x)\n x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)\n x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)\n x = torch.cat((x1, x2), 1)\n x = x.view(batch_size, self.args.size_encoder*2)\n return x\n\nclass Decoder(nn.Module):\n ''' Just a lightweight Fully Connected decoder:\n '''\n\n def __init__(self, args):\n super(Decoder, self).__init__()\n self.num_points = args.num_points\n self.fc1 = nn.Linear(args.size_encoder, 512)\n self.fc2 = nn.Linear(512, 512)\n self.fc3 = nn.Linear(512, 1024)\n self.fc4 = nn.Linear(1024, 1024)\n self.fc5 = nn.Linear(1024, self.num_points * 3)\n #self.dp = nn.Dropout(p=args.dropout)\n self.th = nn.Tanh()\n\n def forward(self, x):\n batch_size = x.size()[0]\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.relu(self.fc4(x))\n #x = self.dp(x)\n x = self.th(self.fc5(x))\n x = x.view(batch_size, self.num_points, 3)\n return x\n\n\nclass PyramidDecoder(nn.Module):\n ''' Point pyramid decoder from PF_Net:\n '''\n\n def __init__(self, args):\n super(PyramidDecoder, self).__init__()\n self.num_points = args.num_points\n self.fc1 = nn.Linear(args.size_encoder*2, 1024)\n self.fc2 = nn.Linear(1024, 512)\n self.fc3 = nn.Linear(512, 256)\n\n self.fc1_1 = nn.Linear(1024, 256 * self.num_points)\n self.fc2_1 = nn.Linear(512, 128 * 256)\n self.fc3_1 = nn.Linear(256, 128 * 3)\n\n self.conv1_1 = torch.nn.Conv1d(self.num_points, self.num_points, 1)\n self.conv1_2 = torch.nn.Conv1d(self.num_points, 512, 1)\n self.conv1_3 = torch.nn.Conv1d(512, int((self.num_points * 3) / 256), 1)\n self.conv2_1 = torch.nn.Conv1d(256, 6, 1)\n\n\n def forward(self, x):\n x_1 = F.relu(self.fc1(x)) # 1024\n x_2 = F.relu(self.fc2(x_1)) # 512\n x_3 = F.relu(self.fc3(x_2)) # 256\n\n pc1_feat = self.fc3_1(x_3)\n pc1_xyz = pc1_feat.reshape(-1, 128, 3) # 128x3 [center1 ,coarse sampling] final!\n\n pc2_feat = F.relu(self.fc2_1(x_2))\n pc2_feat = pc2_feat.reshape(-1, 256, 128)\n pc2_xyz = self.conv2_1(pc2_feat) # 256x128 -> 6x128 [center2, fine sampling]\n\n pc3_feat = F.relu(self.fc1_1(x_1))\n pc3_feat = pc3_feat.reshape(-1, self.num_points, 256)\n pc3_feat = F.relu(self.conv1_1(pc3_feat)) # 1024x256 -> 1024x256\n pc3_feat = F.relu(self.conv1_2(pc3_feat)) # 1024x256 -> 512x256\n pc3_xyz = self.conv1_3(pc3_feat) # 512x256 -> 12x256 complete\n\n pc1_xyz_expand = torch.unsqueeze(pc1_xyz, 2) # 128x1x3\n pc2_xyz = pc2_xyz.transpose(1, 2) # 128x6\n pc2_xyz = pc2_xyz.reshape(-1, 128, 2, 3) # 128x2x3\n pc2_xyz = pc1_xyz_expand + pc2_xyz\n pc2_xyz = pc2_xyz.reshape(-1, 256, 3) # 128x2x3 -> 256x3 final!\n\n pc2_xyz_expand = torch.unsqueeze(pc2_xyz, 2) # 256x1x3\n pc3_xyz = pc3_xyz.transpose(1, 2) # 256x12\n pc3_xyz = pc3_xyz.reshape(-1, 256, int(self.num_points / 256), 3) # 256x4x3\n pc3_xyz = pc2_xyz_expand + pc3_xyz\n pc3_xyz = pc3_xyz.reshape(-1, self.num_points, 3) #1024x3 final!\n\n return pc1_xyz, pc2_xyz, pc3_xyz # center1 ,center2 ,complete\n\n\nclass DGCNN_AutoEncoder(nn.Module):\n '''\n Complete AutoEncoder Model:\n Given an input point cloud X:\n - Step 1: encode the point cloud X into a latent low-dimensional code\n - Step 2: Starting from the code geneate a representation Y as close as possible to the original input X\n\n\n '''\n\n def __init__(self, args):\n super(DGCNN_AutoEncoder, self).__init__()\n #print(\"PointNet AE Init - num_points (# generated): %d\" % num_points)\n\n # Encoder Definition\n self.encoder = DGCNN(args=args)\n\n\n # Decoder Definition\n self.decoder = PyramidDecoder(args=args) if args.type_decoder == \"pyramid\" else Decoder(args=args)\n\n def forward(self, x):\n BS, N, dim = x.size()\n #print(x.size())\n assert dim == 3, f\"Fail: expecting 3 (x-y-z) as last tensor dimension! Found {dim}\"\n\n # Refactoring batch for 'PointNetfeat' processing\n x = x.permute(0, 2, 1) # [BS, N, 3] => [BS, 3, N]\n\n # Encoding\n code = self.encoder(x) # [BS, 3, N] => [BS, size_encoder]\n\n # Decoding\n decoded = self.decoder(code)\n\n return decoded #either a pointcloud [BS, num_points, 3] or a tuple of 3 pointclouds 3 x [BS, 3, num_points]\n\n return decoded\n\n\n\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--k\", type=int, default=0.999, help=\"decay rate for second moment\")\n parser.add_argument(\"--size_encoder\", type=int, default=7, help=\"How long to wait after last time val loss improved.\")\n parser.add_argument(\"--dropout\", type=int, default=0, help=\"How long to wait after last time val loss improved.\")\n opt = parser.parse_args()\n model = DGCNN(opt)\n model.forward(torch.rand((32, 3, 1024)))\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.cat",
"torch.nn.functional.adaptive_max_pool1d",
"torch.nn.Conv2d",
"torch.sum",
"torch.arange",
"torch.unsqueeze",
"torch.nn.Tanh",
"torch.nn.functional.adaptive_avg_pool1d",
"torch.nn.Linear",
"torch.nn.Conv1d",
"torch.rand",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.device"
]
] |
atypon/specter
|
[
"bc1ee723167cf1dbf599603e09539c1823f26c17"
] |
[
"scripts/pytorch_lightning_training_script/train.py"
] |
[
"# basic python packages\nimport json\nimport pickle\nfrom typing import Dict\nimport argparse\nfrom argparse import Namespace\nimport glob\nimport random\nimport numpy as np\nimport itertools\nimport logging\nlogger = logging.getLogger(__name__)\n\n# pytorch packages\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, IterableDataset\n\n# pytorch lightning packages\nimport pytorch_lightning as pl\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom pytorch_lightning.callbacks import ModelCheckpoint\n\n# huggingface transformers packages\nfrom transformers import AdamW\nfrom transformers import AutoTokenizer, AutoModel\nfrom transformers.optimization import (\n Adafactor,\n get_cosine_schedule_with_warmup,\n get_cosine_with_hard_restarts_schedule_with_warmup,\n get_linear_schedule_with_warmup,\n get_polynomial_decay_schedule_with_warmup,\n)\n\n# allennlp dataloading packages\nfrom allennlp.data.dataset_readers.dataset_reader import DatasetReader\nfrom allennlp.data.tokenizers import Tokenizer\nfrom allennlp.data.token_indexers import TokenIndexer\nfrom allennlp.data.tokenizers.word_splitter import WordSplitter\nfrom allennlp.data.tokenizers.token import Token\n\n# Globe constants\ntraining_size = 684100\n# validation_size = 145375\n\n# log_every_n_steps how frequently pytorch lightning logs.\n# By default, Lightning logs every 50 rows, or 50 training steps.\nlog_every_n_steps = 1\n\narg_to_scheduler = {\n \"linear\": get_linear_schedule_with_warmup,\n \"cosine\": get_cosine_schedule_with_warmup,\n \"cosine_w_restarts\": get_cosine_with_hard_restarts_schedule_with_warmup,\n \"polynomial\": get_polynomial_decay_schedule_with_warmup,\n # '': get_constant_schedule, # not supported for now\n # '': get_constant_schedule_with_warmup, # not supported for now\n}\narg_to_scheduler_choices = sorted(arg_to_scheduler.keys())\narg_to_scheduler_metavar = \"{\" + \", \".join(arg_to_scheduler_choices) + \"}\"\n\n\nclass DataReaderFromPickled(DatasetReader):\n \"\"\"\n This is copied from https://github.com/allenai/specter/blob/673346f9f76bcf422b38e0d1b448ef4414bcd4df/specter/data.py#L61:L109 without any change\n \"\"\"\n def __init__(self,\n lazy: bool = False,\n word_splitter: WordSplitter = None,\n tokenizer: Tokenizer = None,\n token_indexers: Dict[str, TokenIndexer] = None,\n max_sequence_length: int = 256,\n concat_title_abstract: bool = None\n ) -> None:\n \"\"\"\n Dataset reader that uses pickled preprocessed instances\n Consumes the output resulting from data_utils/create_training_files.py\n\n the additional arguments are not used here and are for compatibility with\n the other data reader at prediction time\n \"\"\"\n self.max_sequence_length = max_sequence_length\n self.token_indexers = token_indexers\n self._concat_title_abstract = concat_title_abstract\n super().__init__(lazy)\n\n def _read(self, file_path: str):\n \"\"\"\n Args:\n file_path: path to the pickled instances\n \"\"\"\n with open(file_path, 'rb') as f_in:\n unpickler = pickle.Unpickler(f_in)\n while True:\n try:\n instance = unpickler.load()\n # compatibility with old models:\n # for field in instance.fields:\n # if hasattr(instance.fields[field], '_token_indexers') and 'bert' in instance.fields[field]._token_indexers:\n # if not hasattr(instance.fields['source_title']._token_indexers['bert'], '_truncate_long_sequences'):\n # instance.fields[field]._token_indexers['bert']._truncate_long_sequences = True\n # instance.fields[field]._token_indexers['bert']._token_min_padding_length = 0\n if self.max_sequence_length:\n for paper_type in ['source', 'pos', 'neg']:\n if self._concat_title_abstract:\n tokens = []\n title_field = instance.fields.get(f'{paper_type}_title')\n abst_field = instance.fields.get(f'{paper_type}_abstract')\n if title_field:\n tokens.extend(title_field.tokens)\n if tokens:\n tokens.extend([Token('[SEP]')])\n if abst_field:\n tokens.extend(abst_field.tokens)\n if title_field:\n title_field.tokens = tokens\n instance.fields[f'{paper_type}_title'] = title_field\n elif abst_field:\n abst_field.tokens = tokens\n instance.fields[f'{paper_type}_title'] = abst_field\n else:\n yield None\n # title_tokens = get_text_tokens(query_title_tokens, query_abstract_tokens, abstract_delimiter)\n # pos_title_tokens = get_text_tokens(pos_title_tokens, pos_abstract_tokens, abstract_delimiter)\n # neg_title_tokens = get_text_tokens(neg_title_tokens, neg_abstract_tokens, abstract_delimiter)\n # query_abstract_tokens = pos_abstract_tokens = neg_abstract_tokens = []\n for field_type in ['title', 'abstract', 'authors', 'author_positions']:\n field = paper_type + '_' + field_type\n if instance.fields.get(field):\n instance.fields[field].tokens = instance.fields[field].tokens[\n :self.max_sequence_length]\n if field_type == 'abstract' and self._concat_title_abstract:\n instance.fields.pop(field, None)\n yield instance\n except EOFError:\n break\n\n\nclass IterableDataSetMultiWorker(IterableDataset):\n def __init__(self, file_path, tokenizer, size, block_size=100):\n self.datareaderfp = DataReaderFromPickled(max_sequence_length=512)\n self.data_instances = self.datareaderfp._read(file_path)\n self.tokenizer = tokenizer\n self.size = size\n self.block_size = block_size\n\n def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is None:\n iter_end = self.size\n for data_instance in itertools.islice(self.data_instances, iter_end):\n data_input = self.ai2_to_transformers(data_instance, self.tokenizer)\n yield data_input\n\n else:\n # when num_worker is greater than 1. we implement multiple process data loading.\n iter_end = self.size\n worker_id = worker_info.id\n num_workers = worker_info.num_workers\n i = 0\n for data_instance in itertools.islice(self.data_instances, iter_end):\n if int(i / self.block_size) % num_workers != worker_id:\n i = i + 1\n pass\n else:\n i = i + 1\n data_input = self.ai2_to_transformers(data_instance, self.tokenizer)\n yield data_input\n\n def ai2_to_transformers(self, data_instance, tokenizer):\n \"\"\"\n Args:\n data_instance: ai2 data instance\n tokenizer: huggingface transformers tokenizer\n \"\"\"\n source_tokens = data_instance[\"source_title\"].tokens\n source_title = tokenizer(' '.join([str(token) for token in source_tokens]),\n truncation=True, padding=\"max_length\", return_tensors=\"pt\", return_token_type_ids=True,\n max_length=512)\n\n source_input = {'input_ids': source_title['input_ids'][0],\n 'token_type_ids': source_title['token_type_ids'][0],\n 'attention_mask': source_title['attention_mask'][0]}\n\n pos_tokens = data_instance[\"pos_title\"].tokens\n pos_title = tokenizer(' '.join([str(token) for token in pos_tokens]),\n truncation=True, padding=\"max_length\", return_token_type_ids=True, return_tensors=\"pt\", max_length=512)\n\n pos_input = {'input_ids': pos_title['input_ids'][0],\n 'token_type_ids': pos_title['token_type_ids'][0],\n 'attention_mask': pos_title['attention_mask'][0]}\n\n neg_tokens = data_instance[\"neg_title\"].tokens\n neg_title = tokenizer(' '.join([str(token) for token in neg_tokens]),\n truncation=True, padding=\"max_length\", return_token_type_ids=True, return_tensors=\"pt\", max_length=512)\n\n neg_input = {'input_ids': neg_title['input_ids'][0],\n 'token_type_ids': neg_title['token_type_ids'][0],\n 'attention_mask': neg_title['attention_mask'][0]}\n\n return source_input, pos_input, neg_input\n\n\nclass IterableDataSetMultiWorkerTestStep(IterableDataset):\n def __init__(self, file_path, tokenizer, size, block_size=100):\n self.datareaderfp = DataReaderFromPickled(max_sequence_length=512)\n self.data_instances = self.datareaderfp._read(file_path)\n self.tokenizer = tokenizer\n self.size = size\n self.block_size = block_size\n\n def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is None:\n iter_end = self.size\n for data_instance in itertools.islice(self.data_instances, iter_end):\n data_input = self.ai2_to_transformers(data_instance, self.tokenizer)\n yield data_input\n\n else:\n # when num_worker is greater than 1. we implement multiple process data loading.\n iter_end = self.size\n worker_id = worker_info.id\n num_workers = worker_info.num_workers\n i = 0\n for data_instance in itertools.islice(self.data_instances, iter_end):\n if int(i / self.block_size) % num_workers != worker_id:\n i = i + 1\n pass\n else:\n i = i + 1\n data_input = self.ai2_to_transformers(data_instance, self.tokenizer)\n yield data_input\n\n def ai2_to_transformers(self, data_instance, tokenizer):\n \"\"\"\n Args:\n data_instance: ai2 data instance\n tokenizer: huggingface transformers tokenizer\n \"\"\"\n source_tokens = data_instance[\"source_title\"].tokens\n source_title = tokenizer(' '.join([str(token) for token in source_tokens]),\n truncation=True, padding=\"max_length\", return_tensors=\"pt\",\n max_length=512)\n source_input = {'input_ids': source_title['input_ids'][0],\n 'token_type_ids': source_title['token_type_ids'][0],\n 'attention_mask': source_title['attention_mask'][0]}\n\n source_paper_id = data_instance['source_paper_id'].metadata\n\n return source_input, source_paper_id\n\n\nclass TripletLoss(nn.Module):\n \"\"\"\n Triplet loss: copied from https://github.com/allenai/specter/blob/673346f9f76bcf422b38e0d1b448ef4414bcd4df/specter/model.py#L159 without any change\n \"\"\"\n def __init__(self, margin=1.0, distance='l2-norm', reduction='mean'):\n \"\"\"\n Args:\n margin: margin (float, optional): Default: `1`.\n distance: can be `l2-norm` or `cosine`, or `dot`\n reduction (string, optional): Specifies the reduction to apply to the output:\n 'none' | 'mean' | 'sum'. 'none': no reduction will be applied,\n 'mean': the sum of the output will be divided by the number of\n elements in the output, 'sum': the output will be summed. Note: :attr:`size_average`\n and :attr:`reduce` are in the process of being deprecated, and in the meantime,\n specifying either of those two args will override :attr:`reduction`. Default: 'mean'\n \"\"\"\n super(TripletLoss, self).__init__()\n self.margin = margin\n self.distance = distance\n self.reduction = reduction\n\n def forward(self, query, positive, negative):\n if self.distance == 'l2-norm':\n distance_positive = F.pairwise_distance(query, positive)\n distance_negative = F.pairwise_distance(query, negative)\n losses = F.relu(distance_positive - distance_negative + self.margin)\n elif self.distance == 'cosine': # independent of length\n distance_positive = F.cosine_similarity(query, positive)\n distance_negative = F.cosine_similarity(query, negative)\n losses = F.relu(-distance_positive + distance_negative + self.margin)\n elif self.distance == 'dot': # takes into account the length of vectors\n shapes = query.shape\n # batch dot product\n distance_positive = torch.bmm(\n query.view(shapes[0], 1, shapes[1]),\n positive.view(shapes[0], shapes[1], 1)\n ).reshape(shapes[0], )\n distance_negative = torch.bmm(\n query.view(shapes[0], 1, shapes[1]),\n negative.view(shapes[0], shapes[1], 1)\n ).reshape(shapes[0], )\n losses = F.relu(-distance_positive + distance_negative + self.margin)\n else:\n raise TypeError(f\"Unrecognized option for `distance`:{self.distance}\")\n\n if self.reduction == 'mean':\n return losses.mean()\n elif self.reduction == 'sum':\n return losses.sum()\n elif self.reduction == 'none':\n return losses\n else:\n raise TypeError(f\"Unrecognized option for `reduction`:{self.reduction}\")\n\n\nclass Specter(pl.LightningModule):\n def __init__(self, init_args):\n super().__init__()\n if isinstance(init_args, dict):\n # for loading the checkpoint, pl passes a dict (hparams are saved as dict)\n init_args = Namespace(**init_args)\n checkpoint_path = init_args.checkpoint_path\n logger.info(f'loading model from checkpoint: {checkpoint_path}')\n\n self.hparams = init_args\n self.model = AutoModel.from_pretrained(\"sentence-transformers/all-mpnet-base-v2\")\n self.tokenizer = AutoTokenizer.from_pretrained(\"sentence-transformers/all-mpnet-base-v2\")\n self.tokenizer.model_max_length = self.model.config.max_position_embeddings\n self.hparams.seqlen = self.model.config.max_position_embeddings\n self.triple_loss = TripletLoss()\n # number of training instances\n self.training_size = None\n # number of testing instances\n self.validation_size = None\n # number of test instances\n self.test_size = None\n # This is a dictionary to save the embeddings for source papers in test step.\n self.embedding_output = {}\n\n def forward(self, input_ids, token_type_ids, attention_mask):\n # in lightning, forward defines the prediction/inference actions\n source_embedding = self.model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)\n return source_embedding[1]\n\n def _get_loader(self, split):\n if split == 'train':\n fname = self.hparams.train_file\n size = self.training_size\n elif split == 'dev':\n fname = self.hparams.dev_file\n size = self.validation_size\n elif split == 'test':\n fname = self.hparams.test_file\n size = self.test_size\n else:\n assert False\n\n if split == 'test':\n dataset = IterableDataSetMultiWorkerTestStep(file_path=fname, tokenizer=self.tokenizer, size=size)\n else:\n dataset = IterableDataSetMultiWorker(file_path=fname, tokenizer=self.tokenizer, size=size)\n\n # pin_memory enables faster data transfer to CUDA-enabled GPU.\n loader = DataLoader(dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers,\n shuffle=False, pin_memory=False)\n return loader\n\n def setup(self, mode):\n self.train_loader = self._get_loader(\"train\")\n\n def train_dataloader(self):\n return self.train_loader\n\n def val_dataloader(self):\n self.val_dataloader_obj = self._get_loader('dev')\n return self.val_dataloader_obj\n\n def test_dataloader(self):\n return self._get_loader('test')\n\n @property\n def total_steps(self) -> int:\n \"\"\"The number of total training steps that will be run. Used for lr scheduler purposes.\"\"\"\n num_devices = max(1, self.hparams.total_gpus) # TODO: consider num_tpu_cores\n effective_batch_size = self.hparams.batch_size * self.hparams.grad_accum * num_devices\n # dataset_size = len(self.train_loader.dataset)\n \"\"\"The size of the training data need to be coded with more accurate number\"\"\"\n dataset_size = training_size\n return (dataset_size / effective_batch_size) * self.hparams.num_epochs\n\n def get_lr_scheduler(self):\n get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]\n scheduler = get_schedule_func(\n self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps\n )\n scheduler = {\"scheduler\": scheduler, \"interval\": \"step\", \"frequency\": 1}\n return scheduler\n\n def configure_optimizers(self):\n \"\"\"Prepare optimizer and schedule (linear warmup and decay)\"\"\"\n model = self.model\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.hparams.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n if self.hparams.adafactor:\n optimizer = Adafactor(\n optimizer_grouped_parameters, lr=self.hparams.lr, scale_parameter=False, relative_step=False\n )\n\n else:\n optimizer = AdamW(\n optimizer_grouped_parameters, lr=self.hparams.lr, eps=self.hparams.adam_epsilon\n )\n self.opt = optimizer\n\n scheduler = self.get_lr_scheduler()\n\n return [optimizer], [scheduler]\n\n def training_step(self, batch, batch_idx):\n source_embedding = self.model(**batch[0])[1]\n pos_embedding = self.model(**batch[1])[1]\n neg_embedding = self.model(**batch[2])[1]\n\n loss = self.triple_loss(source_embedding, pos_embedding, neg_embedding)\n\n lr_scheduler = self.trainer.lr_schedulers[0][\"scheduler\"]\n\n self.log('train_loss', loss, on_step=True, on_epoch=False, prog_bar=True, logger=True)\n self.log('rate', lr_scheduler.get_last_lr()[-1], on_step=True, on_epoch=False, prog_bar=True, logger=True)\n return {\"loss\": loss}\n\n def validation_step(self, batch, batch_idx):\n source_embedding = self.model(**batch[0])[1]\n pos_embedding = self.model(**batch[1])[1]\n neg_embedding = self.model(**batch[2])[1]\n\n loss = self.triple_loss(source_embedding, pos_embedding, neg_embedding)\n self.log('val_loss', loss, on_step=True, on_epoch=False, prog_bar=True)\n return {'val_loss': loss}\n\n def _eval_end(self, outputs) -> tuple:\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n if self.trainer.use_ddp:\n torch.distributed.all_reduce(avg_loss, op=torch.distributed.ReduceOp.SUM)\n avg_loss /= self.trainer.world_size\n results = {\"avg_val_loss\": avg_loss}\n for k, v in results.items():\n if isinstance(v, torch.Tensor):\n results[k] = v.detach().cpu().item()\n return results\n\n def validation_epoch_end(self, outputs: list) -> dict:\n ret = self._eval_end(outputs)\n\n self.log('avg_val_loss', ret[\"avg_val_loss\"], on_epoch=True, prog_bar=True)\n\n def test_epoch_end(self, outputs: list):\n # convert the dictionary of {id1:embedding1, id2:embedding2, ...} to a\n # list of dictionaries [{'id':'id1', 'embedding': 'embedding1'},{'id':'id2', 'embedding': 'embedding2'}, ...]\n embedding_output_list = [{'id': key, 'embedding': value.detach().cpu().numpy().tolist()}\n for key, value in self.embedding_output.items()]\n\n with open(self.hparams.save_dir+'/embedding_result.jsonl', 'w') as fp:\n fp.write('\\n'.join(json.dumps(i) for i in embedding_output_list))\n\n def test_step(self, batch, batch_nb):\n source_embedding = self.model(**batch[0])[1]\n source_paper_id = batch[1]\n\n batch_embedding_output = dict(zip(source_paper_id, source_embedding))\n\n # .update() will automatically remove duplicates.\n self.embedding_output.update(batch_embedding_output)\n # return self.validation_step(batch, batch_nb)\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--checkpoint_path', default=None, help='path to the model (if not setting checkpoint)')\n parser.add_argument('--train_file')\n parser.add_argument('--dev_file')\n parser.add_argument('--test_file')\n parser.add_argument('--input_dir', default=None, help='optionally provide a directory of the data and train/test/dev files will be automatically detected')\n parser.add_argument('--batch_size', default=1, type=int)\n parser.add_argument('--grad_accum', default=1, type=int)\n parser.add_argument('--gpus', default='1')\n parser.add_argument('--seed', default=1918, type=int)\n parser.add_argument('--fp16', default=False, action='store_true')\n parser.add_argument('--test_only', default=False, action='store_true')\n parser.add_argument('--test_checkpoint', default=None)\n parser.add_argument('--limit_test_batches', default=1.0, type=float)\n parser.add_argument('--limit_val_batches', default=1.0, type=float)\n parser.add_argument('--val_check_interval', default=1.0, type=float)\n parser.add_argument('--num_epochs', default=1, type=int)\n parser.add_argument(\"--lr\", type=float, default=2e-5)\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\"--num_workers\", default=4, type=int, help=\"kwarg passed to DataLoader\")\n parser.add_argument(\"--adafactor\", action=\"store_true\")\n parser.add_argument('--save_dir', required=True)\n\n parser.add_argument('--num_samples', default=None, type=int)\n parser.add_argument(\"--lr_scheduler\",\n default=\"linear\",\n choices=arg_to_scheduler_choices,\n metavar=arg_to_scheduler_metavar,\n type=str,\n help=\"Learning rate scheduler\")\n args = parser.parse_args()\n\n if args.input_dir is not None:\n files = glob.glob(args.input_dir + '/*')\n for f in files:\n fname = f.split('/')[-1]\n if 'train' in fname:\n args.train_file = f\n elif 'dev' in fname or 'val' in fname:\n args.dev_file = f\n elif 'test' in fname:\n args.test_file = f\n return args\n\n\ndef get_train_params(args):\n train_params = {}\n train_params[\"precision\"] = 16 if args.fp16 else 32\n if (isinstance(args.gpus, int) and args.gpus > 1) or (isinstance(args.gpus, list ) and len(args.gpus) > 1):\n train_params[\"distributed_backend\"] = \"ddp\"\n else:\n train_params[\"distributed_backend\"] = None\n train_params[\"accumulate_grad_batches\"] = args.grad_accum\n train_params['track_grad_norm'] = -1\n train_params['limit_val_batches'] = args.limit_val_batches\n train_params['val_check_interval'] = args.val_check_interval\n train_params['gpus'] = args.gpus\n train_params['max_epochs'] = args.num_epochs\n train_params['log_every_n_steps'] = log_every_n_steps\n return train_params\n\n\ndef main():\n args = parse_args()\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.num_workers ==0:\n print(\"num_workers cannot be less than 1\")\n return\n\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(args.seed)\n if ',' in args.gpus:\n args.gpus = list(map(int, args.gpus.split(',')))\n args.total_gpus = len(args.gpus)\n else:\n args.gpus = int(args.gpus)\n args.total_gpus = args.gpus\n\n if args.test_only:\n print('loading model...')\n model = Specter.load_from_checkpoint(args.test_checkpoint)\n trainer = pl.Trainer(gpus=args.gpus, limit_val_batches=args.limit_val_batches)\n trainer.test(model)\n\n else:\n\n model = Specter(args)\n\n # default logger used by trainer\n logger = TensorBoardLogger(\n save_dir=args.save_dir,\n version=0,\n name='pl-logs'\n )\n\n # second part of the path shouldn't be f-string\n filepath = f'{args.save_dir}/version_{logger.version}/checkpoints/' + 'ep-{epoch}_avg_val_loss-{avg_val_loss:.3f}'\n checkpoint_callback = ModelCheckpoint(\n filepath=filepath,\n save_top_k=1,\n verbose=True,\n monitor='avg_val_loss', # monitors metrics logged by self.log.\n mode='min',\n prefix=''\n )\n\n extra_train_params = get_train_params(args)\n\n trainer = pl.Trainer(logger=logger,\n checkpoint_callback=checkpoint_callback,\n **extra_train_params)\n\n trainer.fit(model)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.random.seed",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.utils.data.get_worker_info",
"torch.nn.functional.pairwise_distance",
"torch.nn.functional.relu",
"torch.nn.functional.cosine_similarity",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.stack",
"torch.distributed.all_reduce"
]
] |
kanderso-nrel/pyopencl
|
[
"0ee2164ec65de9a4aa69e0c01e70aa66c3d989cb"
] |
[
"examples/demo_mandelbrot.py"
] |
[
"# I found this example for PyCuda here:\n# http://wiki.tiker.net/PyCuda/Examples/Mandelbrot\n#\n# An improved sequential/pure Python code was contributed\n# by CRVSADER//KY <[email protected]>.\n#\n# I adapted it for PyOpenCL. Hopefully it is useful to someone.\n# July 2010, [email protected]\n#\n# Original readme below these lines.\n\n# Mandelbrot calculate using GPU, Serial numpy and faster numpy\n# Use to show the speed difference between CPU and GPU calculations\n# [email protected] March 2010\n\n# Based on vegaseat's TKinter/numpy example code from 2006\n# http://www.daniweb.com/code/snippet216851.html#\n# with minor changes to move to numpy from the obsolete Numeric\n\nimport time\n\nimport numpy as np\n\nimport pyopencl as cl\n\nfrom PIL import Image\n\n# You can choose a calculation routine below (calc_fractal), uncomment\n# one of the three lines to test the three variations\n# Speed notes are listed in the same place\n\n# set width and height of window, more pixels take longer to calculate\nw = 2048\nh = 2048\n\n\ndef calc_fractal_opencl(q, maxiter):\n ctx = cl.create_some_context()\n queue = cl.CommandQueue(ctx)\n\n output = np.empty(q.shape, dtype=np.uint16)\n\n mf = cl.mem_flags\n q_opencl = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=q)\n output_opencl = cl.Buffer(ctx, mf.WRITE_ONLY, output.nbytes)\n\n prg = cl.Program(\n ctx,\n \"\"\"\n #pragma OPENCL EXTENSION cl_khr_byte_addressable_store : enable\n __kernel void mandelbrot(__global float2 *q,\n __global ushort *output, ushort const maxiter)\n {\n int gid = get_global_id(0);\n float nreal, real = 0;\n float imag = 0;\n\n output[gid] = 0;\n\n for(int curiter = 0; curiter < maxiter; curiter++) {\n nreal = real*real - imag*imag + q[gid].x;\n imag = 2* real*imag + q[gid].y;\n real = nreal;\n\n if (real*real + imag*imag > 4.0f)\n output[gid] = curiter;\n }\n }\n \"\"\",\n ).build()\n\n prg.mandelbrot(\n queue, output.shape, None, q_opencl, output_opencl, np.uint16(maxiter)\n )\n\n cl.enqueue_copy(queue, output, output_opencl).wait()\n\n return output\n\n\ndef calc_fractal_serial(q, maxiter):\n # calculate z using pure python on a numpy array\n # note that, unlike the other two implementations,\n # the number of iterations per point is NOT constant\n z = np.zeros(q.shape, complex)\n output = np.resize(\n np.array(\n 0,\n ),\n q.shape,\n )\n for i in range(len(q)):\n for iter in range(maxiter):\n z[i] = z[i] * z[i] + q[i]\n if abs(z[i]) > 2.0:\n output[i] = iter\n break\n return output\n\n\ndef calc_fractal_numpy(q, maxiter):\n # calculate z using numpy, this is the original\n # routine from vegaseat's URL\n output = np.resize(\n np.array(\n 0,\n ),\n q.shape,\n )\n z = np.zeros(q.shape, np.complex64)\n\n for it in range(maxiter):\n z = z * z + q\n done = np.greater(abs(z), 2.0)\n q = np.where(done, 0 + 0j, q)\n z = np.where(done, 0 + 0j, z)\n output = np.where(done, it, output)\n return output\n\n\n# choose your calculation routine here by uncommenting one of the options\ncalc_fractal = calc_fractal_opencl\n# calc_fractal = calc_fractal_serial\n# calc_fractal = calc_fractal_numpy\n\n\nclass Mandelbrot:\n def draw(self, x1, x2, y1, y2, maxiter=30):\n # draw the Mandelbrot set, from numpy example\n xx = np.arange(x1, x2, (x2 - x1) / w)\n yy = np.arange(y2, y1, (y1 - y2) / h) * 1j\n q = np.ravel(xx + yy[:, np.newaxis]).astype(np.complex64)\n\n start_main = time.time()\n output = calc_fractal(q, maxiter)\n end_main = time.time()\n\n secs = end_main - start_main\n print(\"Main took\", secs)\n\n self.mandel = (output.reshape((h, w)) / float(output.max()) * 255.0).astype(\n np.uint8\n )\n\n def create_image(self):\n \"\"\" \"\n create the image from the draw() string\n \"\"\"\n # you can experiment with these x and y ranges\n self.draw(-2.13, 0.77, -1.3, 1.3)\n self.im = Image.fromarray(self.mandel)\n self.im.putpalette([i for rgb in ((j, 0, 0) for j in range(255))\n for i in rgb])\n\n def create_label(self):\n # put the image on a label widget\n self.image = ImageTk.PhotoImage(self.im)\n self.label = tk.Label(self.root, image=self.image)\n self.label.pack()\n\n def run_tk(self):\n self.root = tk.Tk()\n self.root.title(\"Mandelbrot Set\")\n self.create_image()\n self.create_label()\n # start event loop\n self.root.mainloop()\n\n\nif __name__ == \"__main__\":\n test = Mandelbrot()\n try:\n import tkinter as tk\n except ModuleNotFoundError:\n test.create_image()\n else:\n from PIL import ImageTk\n try:\n test.run_tk()\n except tk.TclError:\n test.create_image()\n"
] |
[
[
"numpy.arange",
"numpy.uint16",
"numpy.ravel",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.empty"
]
] |
marblet/gat-pytorch
|
[
"ff9cfad34460ab505458477ac43cd5f03fc78a46"
] |
[
"train.py"
] |
[
"import torch\nimport torch.nn.functional as F\nfrom torch.optim import Adam\nfrom copy import deepcopy\nfrom numpy import mean, std\nfrom tqdm import tqdm\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nclass EarlyStopping:\n def __init__(self, patience, verbose, use_loss, use_acc, save_model):\n assert use_loss or use_acc, 'use loss or (and) acc'\n self.patience = patience\n self.use_loss = use_loss\n self.use_acc = use_acc\n self.save_model = save_model\n self.verbose = verbose\n self.counter = 0\n self.best_val_loss = float('inf')\n self.best_val_acc = 0\n self.state_dict = None\n\n def check(self, evals, model, epoch):\n if self.use_loss and self.use_acc:\n # For GAT, based on https://github.com/PetarV-/GAT/blob/master/execute_cora.py\n if evals['val_loss'] <= self.best_val_loss or evals['val_acc'] >= self.best_val_acc:\n if evals['val_loss'] <= self.best_val_loss and evals['val_acc'] >= self.best_val_acc:\n if self.save_model:\n self.state_dict = deepcopy(model.state_dict())\n self.best_val_loss = min(self.best_val_loss, evals['val_loss'])\n self.best_val_acc = max(self.best_val_acc, evals['val_acc'])\n self.counter = 0\n else:\n self.counter += 1\n elif self.use_loss:\n if evals['val_loss'] < self.best_val_loss:\n self.best_val_loss = evals['val_loss']\n self.counter = 0\n if self.save_model:\n self.state_dict = deepcopy(model.state_dict())\n else:\n self.counter += 1\n elif self.use_acc:\n if evals['val_acc'] > self.best_val_acc:\n self.best_val_acc = evals['val_acc']\n self.counter = 0\n if self.save_model:\n self.state_dict = deepcopy(model.state_dict())\n else:\n self.counter += 1\n stop = False\n if self.counter >= self.patience:\n stop = True\n if self.verbose:\n print(\"Stop training, epoch:\", epoch)\n if self.save_model:\n model.load_state_dict(self.state_dict)\n return stop\n\n\ndef train(model, optimizer, data):\n model.train()\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output[data.train_mask], data.labels[data.train_mask])\n loss.backward()\n optimizer.step()\n\n\ndef evaluate(model, data):\n model.eval()\n\n with torch.no_grad():\n output = model(data)\n\n outputs = {}\n for key in ['train', 'val', 'test']:\n if key == 'train':\n mask = data.train_mask\n elif key == 'val':\n mask = data.val_mask\n else:\n mask = data.test_mask\n loss = F.nll_loss(output[mask], data.labels[mask]).item()\n pred = output[mask].max(dim=1)[1]\n acc = pred.eq(data.labels[mask]).sum().item() / mask.sum().item()\n\n outputs['{}_loss'.format(key)] = loss\n outputs['{}_acc'.format(key)] = acc\n\n return outputs\n\n\ndef run(data, model, lr, weight_decay, epochs=100000, niter=100, early_stopping=True, patience=100,\n use_loss=True, use_acc=True, save_model=True, verbose=False):\n # for GPU\n data.to(device)\n\n val_acc_list = []\n test_acc_list = []\n\n for _ in tqdm(range(niter)):\n optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n model.to(device).reset_parameters()\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n\n # for early stopping\n if early_stopping:\n stop_checker = EarlyStopping(patience, verbose, use_loss, use_acc, save_model)\n\n for epoch in range(1, epochs + 1):\n train(model, optimizer, data)\n evals = evaluate(model, data)\n\n if verbose:\n print('epoch: {: 4d}'.format(epoch),\n 'train loss: {:.5f}'.format(evals['train_loss']),\n 'train acc: {:.5f}'.format(evals['train_acc']),\n 'val loss: {:.5f}'.format(evals['val_loss']),\n 'val acc: {:.5f}'.format(evals['val_acc']))\n\n if early_stopping:\n if stop_checker.check(evals, model, epoch):\n break\n\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n evals = evaluate(model, data)\n if verbose:\n for met, val in evals.items():\n print(met, val)\n\n val_acc_list.append(evals['val_acc'])\n test_acc_list.append(evals['test_acc'])\n\n print(\"mean\", mean(test_acc_list))\n print(\"std\", std(test_acc_list))\n return {\n 'val_acc': mean(val_acc_list),\n 'test_acc': mean(test_acc_list),\n 'test_acc_std': std(test_acc_list)\n }\n"
] |
[
[
"torch.cuda.synchronize",
"torch.nn.functional.nll_loss",
"numpy.std",
"torch.no_grad",
"numpy.mean",
"torch.cuda.is_available"
]
] |
otaviomguerra/airflow-presentation
|
[
"c83f37f9abe6cdf8c42d8050d393e18f80c91fee"
] |
[
"dags/demo-dag.py"
] |
[
"from airflow import DAG\r\nfrom airflow.providers.http.sensors.http import HttpSensor\r\n\r\nfrom datetime import datetime, timedelta\r\nfrom airflow.operators.python import PythonOperator\r\nfrom airflow.providers.telegram.operators.telegram import TelegramOperator\r\n\r\nimport requests\r\nimport pandas as pd\r\nfrom sqlalchemy import create_engine\r\nfrom airflow.models import Variable\r\n\r\ndefault_args = {\r\n \"owner\": \"airflow\",\r\n \"start_date\": datetime(2021, 1, 1),\r\n \"depends_on_past\": False,\r\n \"email_on_failure\": False,\r\n \"email_on_retry\": False,\r\n \"email\": \"[email protected]\",\r\n \"retries\": 1,\r\n \"retry_delay\": timedelta(minutes=5)\r\n }\r\n\r\n\r\ndef save_api_data(df):\r\n engine = create_engine(Variable.get('DATABASE_CONN'), echo=False)\r\n df.to_sql(\r\n name='Forex',\r\n con=engine,\r\n schema='public',\r\n if_exists='append',\r\n index=False,\r\n method='multi'\r\n )\r\n print('Dados salvos com sucesso!')\r\n\r\n\r\ndef get_api_data():\r\n r = requests.get(Variable.get('API_ENDPOINT'))\r\n json_response = r.json()\r\n rates_dict, base, date = json_response['rates'], json_response['base'], json_response['date']\r\n rates_dict.update({'base': base, 'date': date})\r\n rates_df = pd.DataFrame([rates_dict])\r\n print(rates_df.head())\r\n \r\n save_api_data(rates_df)\r\n\r\n\r\nwith DAG(dag_id=\"demo_dag\", schedule_interval=\"@daily\", default_args=default_args, catchup=False) as dag:\r\n \r\n is_forex_rates_available = HttpSensor(\r\n task_id=\"is_forex_rates_available\",\r\n method=\"GET\",\r\n http_conn_id=\"forex_api\",\r\n endpoint=\"/marclamberti/f45f872dea4dfd3eaa015a4a1af4b39b\",\r\n response_check=lambda response: \"rates\" in response.text,\r\n poke_interval=5,\r\n timeout=20\r\n )\r\n \r\n get_api_data_task = PythonOperator(\r\n task_id=\"get_api_data\",\r\n python_callable=get_api_data\r\n )\r\n\r\n send_message_telegram_task = TelegramOperator(\r\n task_id='send_message_telegram',\r\n telegram_conn_id='telegram_conn_id',\r\n text='Dados disponiveis no banco!',\r\n dag=dag,\r\n )\r\n\r\n is_forex_rates_available >> get_api_data_task >> send_message_telegram_task\r\n\r\n"
] |
[
[
"pandas.DataFrame"
]
] |
konstantinos-p/PAC_Bayesian_Generalization
|
[
"c3ab6cdcfda4dcbdb9c5fb073f29490cde511b13"
] |
[
"utils_spectral_norm.py"
] |
[
"\"\"\"\nIn this script we have functions that calculate the spectral norm for different keras layers.\nSpecifically we can calculate the spectral norm of\n1) convolutional\n2) locally connected layers\n. This is a difficult problem as the full operators are huge sparse matrices.\n\"\"\"\n\nimport numpy as np\nfrom keras.callbacks import Callback\n\ndef compute_spectral_norm_of_conv(weights,featuremap_dim):\n # The dimensions of the weight matrix are\n # trainable_weights[0] = (filter_d1,filter_d2,input_channels,output_channels) This is the kernel.\n # trainable_weights[1] = (output_channels) This is the bias.\n\n dim1 = weights.shape[0]\n dim2 = weights.shape[1]\n dim3 = weights.shape[2]\n dim4 = weights.shape[3]\n\n fourier_weights = np.empty((featuremap_dim*featuremap_dim,dim3,dim4),dtype='cfloat')\n tmp_featuremap = np.zeros((featuremap_dim,featuremap_dim))\n\n\n for i in range(0,dim3):\n for j in range(0,dim4):\n tmp_featuremap[0:dim1,0:dim2] = weights[:,:,i,j]\n fourier_weights[:,i,j] = np.reshape(np.fft.fft2(tmp_featuremap),-1)\n\n max_l2 = 0\n\n for i in range(0,featuremap_dim*featuremap_dim):\n if np.linalg.norm(fourier_weights[i,:,:],2)>max_l2:\n max_l2 = np.linalg.norm(fourier_weights[i,:,:],2)\n return max_l2\n\ndef compute_spectral_norm_of_locally_connected(weights,featuremap_dim,epsilon,max_it,hta):\n # The dimensions of the weight matrix are\n # trainable_weights[0] = (filter_d1,filter_d2,input_channels,output_channels) This is the kernel.\n # trainable_weights[1] = (output_channels) This is the bias.\n\n dim1 = weights.shape[0]\n dim2 = weights.shape[1]\n dim3 = weights.shape[2]\n dim4 = weights.shape[3]\n\n flat_maps = np.zeros((featuremap_dim*dim3*dim4,1))\n\n w = np.random.normal(0,1,size=(featuremap_dim*dim3*dim4,1))\n w = w/np.linalg.norm(w)\n\n singular_value_current = 0\n singular_value = np.zeros((max_it,1))\n\n for i in range(0,max_it):\n\n # Create a convolutional filter map\n featuremaps = np.zeros((featuremap_dim, featuremap_dim, dim3, dim4))\n filter_loc = np.random.randint(1,featuremap_dim-1,size=(2))\n featuremaps[filter_loc[0]-1:filter_loc[0]+1,filter_loc[1]-1:filter_loc[1]+1,:,:] = weights\n flat_maps = featuremaps[:]\n\n w = w + hta*flat_maps*(flat_maps.T@w)\n w = w / np.linalg.norm(w)\n\n singular_value[i] = np.power(flat_maps.T@w,2)\n\n if np.abs(singular_value[i]-singular_value_current)>epsilon:\n singular_value_current = singular_value[i]\n else:\n break\n return singular_value\n\n#Create Parseval Regularisation\nclass convParsevalReg(Callback):\n\n def __init__(self,layer_name,beta_par):\n self.validation_data = None\n self.model = None\n self.layername = layer_name\n self.beta = beta_par\n\n def on_batch_end(self, batch, logs={}):\n\n #This is an implementation of Parseval regularisation for convolutional layers\n\n # The dimensions of the weight matrix are\n # trainable_weights[0] = (filter_d1,filter_d2,input_channels,output_channels) This is the kernel.\n # trainable_weights[1] = (output_channels) This is the bias.\n\n beta_param = self.beta\n conv_layer = self.model.get_layer(self.layername)\n weights = conv_layer.get_weights()[0]\n bias = conv_layer.get_weights()[1]\n\n weights_swapped = np.transpose(weights,[0,1,3,2])\n\n dim1 = weights_swapped.shape[0]\n dim2 = weights_swapped.shape[1]\n dim3 = weights_swapped.shape[2]\n dim4 = weights_swapped.shape[3]\n\n vector_weights = np.reshape(weights_swapped,(-1,weights_swapped.shape[3]))\n vector_weights = (1+beta_param)*vector_weights-beta_param*vector_weights@vector_weights.T@vector_weights\n\n \"\"\"\n print('\\n')\n print('The spectral norm of the unfolded weight matrix is: ', np.linalg.norm(vector_weights,2))\n\n print('\\n')\n print('The Frobenius norm of the unfolded weight matrix is: ', np.linalg.norm(vector_weights,'fro'))\n \"\"\"\n\n weights_swapped = np.reshape(vector_weights,(dim1,dim2,dim3,dim4))\n weights = np.transpose(weights_swapped,[0,1,3,2])\n\n conv_layer.set_weights([weights,bias])\n\nclass denseParsevalReg(Callback):\n\n def __init__(self,layer_name,beta_par,subsampling):\n self.validation_data = None\n self.model = None\n self.layername = layer_name\n self.beta = beta_par\n self.sub = subsampling\n\n def on_batch_end(self, batch, logs={}):\n\n #This is an implementation of Parseval regularisation for dense layers\n\n beta_param = self.beta\n subsampling_size = self.sub\n dense_layer = self.model.get_layer(self.layername)\n dense_weights = dense_layer.get_weights()[0]\n bias = dense_layer.get_weights()[1]\n\n rand_rows = np.random.permutation(dense_weights.shape[0])\n\n dense_weights_subsampled = dense_weights[rand_rows[0:subsampling_size],:]\n\n dense_weights_subsampled = (1+beta_param)*dense_weights_subsampled-beta_param*dense_weights_subsampled@dense_weights_subsampled.T@dense_weights_subsampled\n\n dense_weights[rand_rows[0:subsampling_size], :] = dense_weights_subsampled\n\n dense_layer.set_weights([dense_weights,bias])\n\nclass localParsevalReg(Callback):\n\n def __init__(self,layer_name,beta_par):\n self.validation_data = None\n self.model = None\n self.layername = layer_name\n self.beta = beta_par\n\n def on_batch_end(self, batch, logs={}):\n\n #This is an implementation of Parseval regularisation for convolutional layers\n\n # The dimensions of the weight matrix are\n # trainable_weights[0] = (filter_d1,filter_d2,input_channels,output_channels) This is the kernel.\n # trainable_weights[1] = (output_channels) This is the bias.\n\n beta_param = self.beta\n local_layer = self.model.get_layer(self.layername)\n weights = local_layer.get_weights()[0]\n bias = local_layer.get_weights()[1]\n\n dim1 = weights.shape[0]\n dim2 = weights.shape[1]\n dim3 = weights.shape[2]\n\n for i in range(0,dim1):\n vector_weights = weights[i,:,:]\n vector_weights = (1+beta_param)*vector_weights-beta_param*vector_weights@vector_weights.T@vector_weights\n weights[i, :, :] = vector_weights\n\n\n #print('\\n')\n #print('The spectral norm of the unfolded weight matrix is: ', np.linalg.norm(vector_weights,2))\n\n\n local_layer.set_weights([weights,bias])"
] |
[
[
"numpy.fft.fft2",
"numpy.abs",
"numpy.power",
"numpy.reshape",
"numpy.linalg.norm",
"numpy.random.normal",
"numpy.random.permutation",
"numpy.transpose",
"numpy.zeros",
"numpy.empty",
"numpy.random.randint"
]
] |
SoftServeInc/yield-paper
|
[
"cbf9710e6866841e231e3f851786a4d7dc4a5c72"
] |
[
"chemprop/features/featurization.py"
] |
[
"from argparse import Namespace\nfrom typing import List, Tuple, Union\n\nfrom rdkit import Chem\nimport torch\n\nfrom chemprop.mol_utils import str_to_mol\n\n# Atom feature sizes\nMAX_ATOMIC_NUM = 100\nATOM_FEATURES = {\n 'atomic_num': list(range(MAX_ATOMIC_NUM)),\n 'degree': [0, 1, 2, 3, 4, 5],\n 'formal_charge': [-1, -2, 1, 2, 0],\n 'chiral_tag': [0, 1, 2, 3],\n 'num_Hs': [0, 1, 2, 3, 4],\n 'hybridization': [\n Chem.rdchem.HybridizationType.SP,\n Chem.rdchem.HybridizationType.SP2,\n Chem.rdchem.HybridizationType.SP3,\n Chem.rdchem.HybridizationType.SP3D,\n Chem.rdchem.HybridizationType.SP3D2\n ],\n}\n\n# Distance feature sizes\nPATH_DISTANCE_BINS = list(range(10))\nTHREE_D_DISTANCE_MAX = 20\nTHREE_D_DISTANCE_STEP = 1\nTHREE_D_DISTANCE_BINS = list(range(0, THREE_D_DISTANCE_MAX + 1, THREE_D_DISTANCE_STEP))\n\n# len(choices) + 1 to include room for uncommon values; + 2 for IsAromatic and mass; +8 for ring membership\nATOM_FDIM = sum(len(choices) + 1 for choices in ATOM_FEATURES.values()) + 2 + 8\nBOND_FDIM = 14 + 8\n\n# Memoization\nSMILES_TO_GRAPH = {}\n\n\ndef clear_cache():\n \"\"\"Clears featurization cache.\"\"\"\n global SMILES_TO_GRAPH\n SMILES_TO_GRAPH = {}\n\n\ndef get_atom_fdim(args: Namespace) -> int:\n \"\"\"\n Gets the dimensionality of atom features.\n\n :param: Arguments.\n \"\"\"\n return ATOM_FDIM\n\n\ndef get_bond_fdim(args: Namespace) -> int:\n \"\"\"\n Gets the dimensionality of bond features.\n\n :param: Arguments.\n \"\"\"\n return BOND_FDIM\n\n\ndef onek_encoding_unk(value: int, choices: List[int]) -> List[int]:\n \"\"\"\n Creates a one-hot encoding.\n\n :param value: The value for which the encoding should be one.\n :param choices: A list of possible values.\n :return: A one-hot encoding of the value in a list of length len(choices) + 1.\n If value is not in the list of choices, then the final element in the encoding is 1.\n \"\"\"\n encoding = [0] * (len(choices) + 1)\n index = choices.index(value) if value in choices else -1\n encoding[index] = 1\n\n return encoding\n\n\ndef atom_features(atom: Chem.rdchem.Atom, functional_groups: List[int] = None) -> List[Union[bool, int, float]]:\n \"\"\"\n Builds a feature vector for an atom.\n\n :param atom: An RDKit atom.\n :param functional_groups: A k-hot vector indicating the functional groups the atom belongs to.\n :return: A list containing the atom features.\n \"\"\"\n features = onek_encoding_unk(atom.GetAtomicNum() - 1, ATOM_FEATURES['atomic_num']) + \\\n onek_encoding_unk(atom.GetTotalDegree(), ATOM_FEATURES['degree']) + \\\n onek_encoding_unk(atom.GetFormalCharge(), ATOM_FEATURES['formal_charge']) + \\\n onek_encoding_unk(int(atom.GetChiralTag()), ATOM_FEATURES['chiral_tag']) + \\\n onek_encoding_unk(int(atom.GetTotalNumHs()), ATOM_FEATURES['num_Hs']) + \\\n onek_encoding_unk(int(atom.GetHybridization()), ATOM_FEATURES['hybridization']) + \\\n [1 if atom.GetIsAromatic() else 0] + \\\n [atom.GetMass() * 0.01] # scaled to about the same range as other features\n features += [\n atom.IsInRingSize(3),\n atom.IsInRingSize(4),\n atom.IsInRingSize(5),\n atom.IsInRingSize(6),\n atom.IsInRingSize(7),\n atom.IsInRingSize(8),\n atom.IsInRingSize(9),\n atom.IsInRingSize(10),\n ]\n if functional_groups is not None:\n features += functional_groups\n return features\n\n\ndef bond_features(bond: Chem.rdchem.Bond) -> List[Union[bool, int, float]]:\n \"\"\"\n Builds a feature vector for a bond.\n\n :param bond: A RDKit bond.\n :return: A list containing the bond features.\n \"\"\"\n if bond is None:\n fbond = [1] + [0] * (BOND_FDIM - 1)\n else:\n bt = bond.GetBondType()\n fbond = [\n 0, # bond is not None\n bt == Chem.rdchem.BondType.SINGLE,\n bt == Chem.rdchem.BondType.DOUBLE,\n bt == Chem.rdchem.BondType.TRIPLE,\n bt == Chem.rdchem.BondType.AROMATIC,\n (bond.GetIsConjugated() if bt is not None else 0),\n (bond.IsInRing() if bt is not None else 0),\n (bond.IsInRingSize(3) if bt is not None else 0),\n (bond.IsInRingSize(4) if bt is not None else 0),\n (bond.IsInRingSize(5) if bt is not None else 0),\n (bond.IsInRingSize(6) if bt is not None else 0),\n (bond.IsInRingSize(7) if bt is not None else 0),\n (bond.IsInRingSize(8) if bt is not None else 0),\n (bond.IsInRingSize(9) if bt is not None else 0),\n (bond.IsInRingSize(10) if bt is not None else 0),\n ]\n fbond += onek_encoding_unk(int(bond.GetStereo()), list(range(6)))\n return fbond\n\n\nclass MolGraph:\n \"\"\"\n A MolGraph represents the graph structure and featurization of a single molecule.\n\n A MolGraph computes the following attributes:\n - smiles: Smiles string.\n - n_atoms: The number of atoms in the molecule.\n - n_bonds: The number of bonds in the molecule.\n - f_atoms: A mapping from an atom index to a list atom features.\n - f_bonds: A mapping from a bond index to a list of bond features.\n - a2b: A mapping from an atom index to a list of incoming bond indices.\n - b2a: A mapping from a bond index to the index of the atom the bond originates from.\n - b2revb: A mapping from a bond index to the index of the reverse bond.\n \"\"\"\n\n def __init__(self, smiles: str, args: Namespace):\n \"\"\"\n Computes the graph structure and featurization of a molecule.\n\n :param smiles: A smiles string.\n :param args: Arguments.\n \"\"\"\n self.smiles = smiles\n self.n_atoms = 0 # number of atoms\n self.n_bonds = 0 # number of bonds\n self.f_atoms = [] # mapping from atom index to atom features\n self.f_bonds = [] # mapping from bond index to concat(in_atom, bond) features\n self.a2b = [] # mapping from atom index to incoming bond indices\n self.b2a = [] # mapping from bond index to the index of the atom the bond is coming from\n self.b2revb = [] # mapping from bond index to the index of the reverse bond\n\n # Convert smiles to molecule\n mol = str_to_mol(smiles, explicit_hydrogens=args.explicit_hydrogens)\n\n # fake the number of \"atoms\" if we are collapsing substructures\n self.n_atoms = mol.GetNumAtoms()\n\n # Require atom map numbers when using reactions\n if args.reaction:\n if any(a.GetAtomMapNum() == 0 for a in mol.GetAtoms()):\n raise Exception(f'{smiles} is missing atom map numbers')\n\n # Ensure that atoms in reactant and product are sorted in the same way\n atoms = sorted(mol.GetAtoms(), key=lambda a: a.GetAtomMapNum())\n else:\n atoms = mol.GetAtoms()\n \n # Get atom features\n for i, atom in enumerate(atoms):\n self.f_atoms.append(atom_features(atom))\n self.f_atoms = [self.f_atoms[i] for i in range(self.n_atoms)]\n\n for _ in range(self.n_atoms):\n self.a2b.append([])\n\n # Get bond features\n for a1 in range(self.n_atoms):\n for a2 in range(a1 + 1, self.n_atoms):\n rdkit_idx1 = atoms[a1].GetIdx()\n rdkit_idx2 = atoms[a2].GetIdx()\n bond = mol.GetBondBetweenAtoms(rdkit_idx1, rdkit_idx2)\n\n if bond is None:\n continue\n\n f_bond = bond_features(bond)\n\n if args.atom_messages:\n self.f_bonds.append(f_bond)\n self.f_bonds.append(f_bond)\n else:\n self.f_bonds.append(self.f_atoms[a1] + f_bond)\n self.f_bonds.append(self.f_atoms[a2] + f_bond)\n\n # Update index mappings\n b1 = self.n_bonds\n b2 = b1 + 1\n self.a2b[a2].append(b1) # b1 = a1 --> a2\n self.b2a.append(a1)\n self.a2b[a1].append(b2) # b2 = a2 --> a1\n self.b2a.append(a2)\n self.b2revb.append(b2)\n self.b2revb.append(b1)\n self.n_bonds += 2\n\n\nclass BatchMolGraph:\n \"\"\"\n A BatchMolGraph represents the graph structure and featurization of a batch of molecules.\n\n A BatchMolGraph contains the attributes of a MolGraph plus:\n - smiles_batch: A list of smiles strings.\n - n_mols: The number of molecules in the batch.\n - atom_fdim: The dimensionality of the atom features.\n - bond_fdim: The dimensionality of the bond features (technically the combined atom/bond features).\n - a_scope: A list of tuples indicating the start and end atom indices for each molecule.\n - b_scope: A list of tuples indicating the start and end bond indices for each molecule.\n - max_num_bonds: The maximum number of bonds neighboring an atom in this batch.\n - b2b: (Optional) A mapping from a bond index to incoming bond indices.\n - a2a: (Optional): A mapping from an atom index to neighboring atom indices.\n \"\"\"\n\n def __init__(self, mol_graphs: List[MolGraph], args: Namespace):\n self.smiles_batch = [mol_graph.smiles for mol_graph in mol_graphs]\n self.n_mols = len(self.smiles_batch)\n\n self.atom_fdim = get_atom_fdim(args)\n self.bond_fdim = get_bond_fdim(args) + (not args.atom_messages) * self.atom_fdim\n\n # Start n_atoms and n_bonds at 1 b/c zero padding\n self.n_atoms = 1 # number of atoms (start at 1 b/c need index 0 as padding)\n self.n_bonds = 1 # number of bonds (start at 1 b/c need index 0 as padding)\n self.a_scope = [] # list of tuples indicating (start_atom_index, num_atoms) for each molecule\n self.b_scope = [] # list of tuples indicating (start_bond_index, num_bonds) for each molecule\n\n # All start with zero padding so that indexing with zero padding returns zeros\n f_atoms = [[0] * self.atom_fdim] # atom features\n f_bonds = [[0] * self.bond_fdim] # combined atom/bond features\n a2b = [[]] # mapping from atom index to incoming bond indices\n b2a = [0] # mapping from bond index to the index of the atom the bond is coming from\n b2revb = [0] # mapping from bond index to the index of the reverse bond\n for mol_graph in mol_graphs:\n f_atoms.extend(mol_graph.f_atoms)\n f_bonds.extend(mol_graph.f_bonds)\n\n for a in range(mol_graph.n_atoms):\n a2b.append([b + self.n_bonds for b in mol_graph.a2b[a]])\n\n for b in range(mol_graph.n_bonds):\n b2a.append(self.n_atoms + mol_graph.b2a[b])\n b2revb.append(self.n_bonds + mol_graph.b2revb[b])\n\n self.a_scope.append((self.n_atoms, mol_graph.n_atoms))\n self.b_scope.append((self.n_bonds, mol_graph.n_bonds))\n self.n_atoms += mol_graph.n_atoms\n self.n_bonds += mol_graph.n_bonds\n\n self.max_num_bonds = max(1, max(len(in_bonds) for in_bonds in a2b)) # max with 1 to fix a crash in rare case of all single-heavy-atom mols\n\n self.f_atoms = torch.FloatTensor(f_atoms)\n self.f_bonds = torch.FloatTensor(f_bonds)\n self.a2b = torch.LongTensor([a2b[a] + [0] * (self.max_num_bonds - len(a2b[a])) for a in range(self.n_atoms)])\n self.b2a = torch.LongTensor(b2a)\n self.b2revb = torch.LongTensor(b2revb)\n self.b2b = None # try to avoid computing b2b b/c O(n_atoms^3)\n self.a2a = None # only needed if using atom messages\n\n def get_components(self) -> Tuple[torch.FloatTensor, torch.FloatTensor,\n torch.LongTensor, torch.LongTensor, torch.LongTensor,\n List[Tuple[int, int]], List[Tuple[int, int]]]:\n \"\"\"\n Returns the components of the BatchMolGraph.\n\n :return: A tuple containing PyTorch tensors with the atom features, bond features, and graph structure\n and two lists indicating the scope of the atoms and bonds (i.e. which molecules they belong to).\n \"\"\"\n return self.f_atoms, self.f_bonds, self.a2b, self.b2a, self.b2revb, self.a_scope, self.b_scope\n\n def get_b2b(self) -> torch.LongTensor:\n \"\"\"\n Computes (if necessary) and returns a mapping from each bond index to all the incoming bond indices.\n\n :return: A PyTorch tensor containing the mapping from each bond index to all the incoming bond indices.\n \"\"\"\n\n if self.b2b is None:\n b2b = self.a2b[self.b2a] # num_bonds x max_num_bonds\n # b2b includes reverse edge for each bond so need to mask out\n revmask = (b2b != self.b2revb.unsqueeze(1).repeat(1, b2b.size(1))).long() # num_bonds x max_num_bonds\n self.b2b = b2b * revmask\n\n return self.b2b\n\n def get_a2a(self) -> torch.LongTensor:\n \"\"\"\n Computes (if necessary) and returns a mapping from each atom index to all neighboring atom indices.\n\n :return: A PyTorch tensor containing the mapping from each bond index to all the incodming bond indices.\n \"\"\"\n if self.a2a is None:\n # b = a1 --> a2\n # a2b maps a2 to all incoming bonds b\n # b2a maps each bond b to the atom it comes from a1\n # thus b2a[a2b] maps atom a2 to neighboring atoms a1\n self.a2a = self.b2a[self.a2b] # num_atoms x max_num_bonds\n\n return self.a2a\n\n\ndef mol2graph(smiles_batch: List[str],\n args: Namespace) -> BatchMolGraph:\n \"\"\"\n Converts a list of SMILES strings to a BatchMolGraph containing the batch of molecular graphs.\n\n :param smiles_batch: A list of SMILES strings.\n :param args: Arguments.\n :return: A BatchMolGraph containing the combined molecular graph for the molecules\n \"\"\"\n mol_graphs = []\n for smiles in smiles_batch:\n if smiles in SMILES_TO_GRAPH:\n mol_graph = SMILES_TO_GRAPH[smiles]\n else:\n mol_graph = MolGraph(smiles, args)\n if not args.no_cache:\n SMILES_TO_GRAPH[smiles] = mol_graph\n mol_graphs.append(mol_graph)\n \n return BatchMolGraph(mol_graphs, args)\n"
] |
[
[
"torch.LongTensor",
"torch.FloatTensor"
]
] |
johnsonkee/mmocr_wxz
|
[
"867b91664ea373873557743c2a4be23fa014b14f"
] |
[
"mmocr/datasets/icdar_dataset.py"
] |
[
"import numpy as np\nfrom pycocotools.coco import COCO\n\nimport mmocr.utils as utils\nfrom mmdet.datasets.builder import DATASETS\nfrom mmdet.datasets.coco import CocoDataset\nfrom mmocr.core.evaluation.hmean import eval_hmean\nimport pdb\n\n\[email protected]_module()\nclass IcdarDataset(CocoDataset):\n CLASSES = ('text')\n\n def __init__(self,\n ann_file,\n pipeline,\n classes=None,\n data_root=None,\n img_prefix='',\n seg_prefix=None,\n proposal_file=None,\n test_mode=False,\n filter_empty_gt=True,\n select_first_k=-1):\n # select first k images for fast debugging.\n self.select_first_k = select_first_k\n\n super().__init__(ann_file, pipeline, classes, data_root, img_prefix,\n seg_prefix, proposal_file, test_mode, filter_empty_gt)\n\n def load_annotations(self, ann_file):\n \"\"\"Load annotation from COCO style annotation file.\n\n Args:\n ann_file (str): Path of annotation file.\n\n Returns:\n list[dict]: Annotation info from COCO api.\n \"\"\"\n\n self.coco = COCO(ann_file)\n self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.img_ids = self.coco.get_img_ids()\n data_infos = []\n\n count = 0\n for i in self.img_ids:\n info = self.coco.load_imgs([i])[0]\n info['filename'] = info['file_name']\n data_infos.append(info)\n count = count + 1\n if count > self.select_first_k and self.select_first_k > 0:\n break\n return data_infos\n\n def _parse_ann_info(self, img_info, ann_info):\n \"\"\"Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\n labels, masks, masks_ignore, seg_map. \"masks\" and\n \"masks_ignore\" are represented by polygon boundary\n point sequences.\n \"\"\"\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n gt_masks_ignore = []\n gt_masks_ann = []\n\n for ann in ann_info:\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n if ann['area'] <= 0 or w < 1 or h < 1:\n continue\n if ann['category_id'] not in self.cat_ids:\n continue\n bbox = [x1, y1, x1 + w, y1 + h]\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(bbox)\n gt_masks_ignore.append(ann.get(\n 'segmentation', None)) # to float32 for latter processing\n\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann['category_id']])\n gt_masks_ann.append(ann.get('segmentation', None))\n # pdb.set_trace()\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n seg_map = img_info['filename'].replace('jpg', 'png')\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks_ignore=gt_masks_ignore,\n masks=gt_masks_ann,\n seg_map=seg_map)\n\n return ann\n\n def evaluate(self,\n results,\n metric='hmean-iou',\n logger=None,\n score_thr=0.3,\n rank_list=None,\n **kwargs):\n \"\"\"Evaluate the hmean metric.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n rank_list (str): json file used to save eval result\n of each image after ranking.\n Returns:\n dict[dict[str: float]]: The evaluation results.\n \"\"\"\n assert utils.is_type_list(results, dict)\n\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['hmean-iou', 'hmean-ic13']\n metrics = set(metrics) & set(allowed_metrics)\n\n img_infos = []\n ann_infos = []\n for i in range(len(self)):\n img_info = {'filename': self.data_infos[i]['file_name']}\n img_infos.append(img_info)\n ann_infos.append(self.get_ann_info(i))\n\n eval_results = eval_hmean(\n results,\n img_infos,\n ann_infos,\n metrics=metrics,\n score_thr=score_thr,\n logger=logger,\n rank_list=rank_list)\n\n return eval_results\n"
] |
[
[
"numpy.array",
"numpy.zeros"
]
] |
lhf-labs/finance-news-analysis-bert
|
[
"8e3d1b2eb2a2b7390d53506329775fe83656c498"
] |
[
"src/bin/main_baseline_lstm_test.py"
] |
[
"import os\nimport git\nimport time\nimport torch\nimport logging\nimport argparse\nimport numpy as np\nfrom controller.data import load_data_test\nfrom controller.model import prepare_device, prepare_preliminary, train_model, test_model\nfrom model.baseline_lstm_classifier import BaselineLSTMClassifier\n\n\"\"\"\nTrain and test a given model.\n\"\"\"\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # Global parameters\n parser.add_argument('data_path', help=\"Path to data.\", type=str)\n parser.add_argument('experiment_path', help=\"Path to experiment.\", type=str)\n parser.add_argument('dataset', help='Dataset to be computed.', type=str, choices=[\"finance\", \"phrasebank\"])\n parser.add_argument('checkpoint_path', type=str)\n parser.add_argument('--experiment-name', help=\"Experiment name.\", type=str, default='baseline_lstm')\n\n # Model architecture parameters\n parser.add_argument('--vocab-size', help=\"Max vocabulary size.\", type=int, default=40000)\n parser.add_argument('--embedding-size', help=\"Embedding size of the model.\", type=int, default=1024)\n parser.add_argument('--number-rnn-layers', help=\"Number of RNN layers of the model.\", type=int, default=2)\n parser.add_argument('--bidirectional', help=\"Use bidirectional RNN.\", action='store_true')\n parser.add_argument('--number-layers', help=\"Number of layers of the model.\", type=int, default=3)\n parser.add_argument('--layer-size', help=\"Layer start size of the model.\", type=int, default=512)\n parser.add_argument('--minimum-layer-size', help=\"Layer minimum size of the model.\", type=int, default=8)\n parser.add_argument('--dropout-rate', help=\"Dropout that is applied to the model.\", type=int, default=0.2)\n\n # Model training parameters\n parser.add_argument('--seed', help=\"Seed to be used for randomization purposes.\", type=int, default=42)\n parser.add_argument('--batch-size', help=\"Batch size to feed the model.\", type=int, default=32)\n parser.add_argument('--optimizer', help=\"Optimizer for training the model.\", type=str, choices=['adam'],\n default='adam')\n parser.add_argument('--lr', help=\"Learning rate for training the model.\", type=float, default=0.001)\n parser.add_argument('--no-cuda', help=\"Avoid using cuda.\", action='store_true')\n args = parser.parse_args()\n\n # Experiments directory\n repo = git.Repo(search_parent_directories=True)\n sha = repo.head.object.hexsha\n ts = time.time()\n experiment_directory = os.path.join(args.experiment_path, f'{args.experiment_name}_{ts}_{sha[:7]}')\n os.makedirs(experiment_directory, exist_ok=True)\n\n # Logger\n logging.basicConfig(filename=os.path.join(experiment_directory, 'process.log'), level=logging.INFO)\n logging.getLogger('').addHandler(logging.StreamHandler())\n\n # Seeds\n torch.manual_seed(args.seed)\n if not args.no_cuda:\n torch.backends.cudnn.deterministic = True\n # torch.set_deterministic(True)\n np.random.seed(args.seed)\n\n # Load data\n data_loader_train, data_loader_valid, data_loader_test = load_data_test(dataset=args.dataset, path=args.data_path,\n batch_size=args.batch_size)\n\n # Build classifier\n device = prepare_device(args.no_cuda)\n classifier = BaselineLSTMClassifier(device=device, vocab_size=args.vocab_size, embedding_size=args.embedding_size,\n bidirectional=args.bidirectional, number_rnn=args.number_rnn_layers,\n number_layers=args.number_layers, layer_size=args.layer_size,\n minimum_layer_size=args.minimum_layer_size,\n dropout_rate=args.dropout_rate)\n classifier.to(device)\n\n # Train and test\n criterion, optimizer = prepare_preliminary(args, classifier)\n classifier.load_state_dict(torch.load(os.path.join(args.checkpoint_path, 'checkpoint_best.pt')))\n test_model(model=classifier, dataset=args.dataset, device=device, data_loader_test=data_loader_test,\n criterion=criterion)\n"
] |
[
[
"torch.manual_seed",
"numpy.random.seed"
]
] |
radix-ai/agoro-field-boundary-detector
|
[
"9dd911df096ce865471ed0330174044f4172cc66"
] |
[
"src/agoro_field_boundary_detector/field_detection/mask_rcnn/train.py"
] |
[
"\"\"\"PyTorch Detection Training, code from https://github.com/pytorch/vision.\"\"\"\nimport datetime\nimport os\nimport time\nfrom typing import Any\n\nimport torch\nimport torch.utils.data\nimport torchvision\nimport torchvision.models.detection\nimport torchvision.models.detection.mask_rcnn\nfrom group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups\n\nimport src.agoro_field_boundary_detector.field_detection.mask_rcnn.presets as presets\nimport src.agoro_field_boundary_detector.field_detection.mask_rcnn.utils as utils\nfrom agoro_field_boundary_detector.field_detection.mask_rcnn.coco_utils import get_coco, get_coco_kp\nfrom agoro_field_boundary_detector.field_detection.mask_rcnn.engine import evaluate, train_one_epoch\n\n\ndef get_dataset(name: Any, image_set: Any, transform: Any, data_path: Any) -> Any:\n \"\"\"Get the COCO dataset.\"\"\"\n paths = {\"coco\": (data_path, get_coco, 91), \"coco_kp\": (data_path, get_coco_kp, 2)}\n p, ds_fn, num_classes = paths[name]\n\n ds = ds_fn(p, image_set=image_set, transforms=transform) # type: ignore\n return ds, num_classes\n\n\ndef get_transform(train: Any) -> Any:\n \"\"\"Transform the presets.\"\"\"\n return presets.DetectionPresetTrain() if train else presets.DetectionPresetEval()\n\n\ndef main(args: Any) -> None: # noqa C901\n utils.init_distributed_mode(args)\n print(args)\n\n device = torch.device(args.device) # type: ignore\n\n # Data loading code\n print(\"Loading data\")\n dataset, num_classes = get_dataset(\n args.dataset, \"train\", get_transform(train=True), args.data_path\n )\n dataset_test, _ = get_dataset(args.dataset, \"val\", get_transform(train=False), args.data_path)\n\n print(\"Creating data loaders\")\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) # type: ignore\n test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test) # type: ignore\n else:\n train_sampler = torch.utils.data.RandomSampler(dataset) # type: ignore\n test_sampler = torch.utils.data.SequentialSampler(dataset_test) # type: ignore\n\n if args.aspect_ratio_group_factor >= 0:\n group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor)\n train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size)\n else:\n train_batch_sampler = torch.utils.data.BatchSampler(\n train_sampler, args.batch_size, drop_last=True\n )\n\n data_loader = torch.utils.data.DataLoader(\n dataset,\n batch_sampler=train_batch_sampler,\n num_workers=args.workers,\n collate_fn=utils.collate_fn,\n )\n\n data_loader_test = torch.utils.data.DataLoader(\n dataset_test,\n batch_size=1,\n sampler=test_sampler,\n num_workers=args.workers,\n collate_fn=utils.collate_fn,\n )\n\n print(\"Creating model\")\n kwargs = {\"trainable_backbone_layers\": args.trainable_backbone_layers}\n if \"rcnn\" in args.model:\n if args.rpn_score_thresh is not None:\n kwargs[\"rpn_score_thresh\"] = args.rpn_score_thresh\n model = torchvision.models.detection.__dict__[args.model](\n num_classes=num_classes, pretrained=args.pretrained, **kwargs\n )\n model.to(device)\n\n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n model_without_ddp = model.module\n\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(\n params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay\n )\n\n # lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, milestones=args.lr_steps, gamma=args.lr_gamma\n )\n\n if args.resume:\n checkpoint = torch.load(args.resume, map_location=\"cpu\") # type: ignore\n model_without_ddp.load_state_dict(checkpoint[\"model\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n lr_scheduler.load_state_dict(checkpoint[\"lr_scheduler\"])\n args.start_epoch = checkpoint[\"epoch\"] + 1\n\n if args.test_only:\n evaluate(model, data_loader_test, device=device)\n return\n\n print(\"Start training\")\n start_time = time.time()\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq)\n lr_scheduler.step()\n if args.output_dir:\n utils.save_on_master(\n {\n \"model\": model_without_ddp.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n \"lr_scheduler\": lr_scheduler.state_dict(),\n \"args\": args,\n \"epoch\": epoch,\n },\n os.path.join(args.output_dir, f\"model_{epoch}.pth\"),\n )\n\n # evaluate after every epoch\n evaluate(model, data_loader_test, device=device)\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print(f\"Training time {total_time_str}\")\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\"--data-path\", default=\"/datasets01/COCO/022719/\", help=\"dataset\")\n parser.add_argument(\"--dataset\", default=\"coco\", help=\"dataset\")\n parser.add_argument(\"--model\", default=\"maskrcnn_resnet50_fpn\", help=\"model\")\n parser.add_argument(\"--device\", default=\"cuda\", help=\"device\") # TODO: Detected later on?\n parser.add_argument(\n \"-b\",\n \"--batch-size\",\n default=1,\n type=int,\n help=\"images per gpu, the total batch size is $NGPU x batch_size\",\n )\n parser.add_argument(\n \"--epochs\", default=26, type=int, metavar=\"N\", help=\"number of total epochs to run\"\n )\n parser.add_argument(\n \"-j\",\n \"--workers\",\n default=0,\n type=int,\n metavar=\"N\",\n help=\"number of data loading workers (default: 0)\",\n )\n parser.add_argument(\n \"--lr\",\n default=0.02,\n type=float,\n help=\"initial learning rate, 0.02 is the default value for training \"\n \"on 8 gpus and 2 images_per_gpu\",\n )\n parser.add_argument(\"--momentum\", default=0.9, type=float, metavar=\"M\", help=\"momentum\")\n parser.add_argument(\n \"--wd\",\n \"--weight-decay\",\n default=1e-4,\n type=float,\n metavar=\"W\",\n help=\"weight decay (default: 1e-4)\",\n dest=\"weight_decay\",\n )\n parser.add_argument(\n \"--lr-step-size\", default=8, type=int, help=\"decrease lr every step-size epochs\"\n )\n parser.add_argument(\n \"--lr-steps\",\n default=[16, 22],\n nargs=\"+\",\n type=int,\n help=\"decrease lr every step-size epochs\",\n )\n parser.add_argument(\n \"--lr-gamma\", default=0.1, type=float, help=\"decrease lr by a factor of lr-gamma\"\n )\n parser.add_argument(\"--print-freq\", default=20, type=int, help=\"print frequency\")\n parser.add_argument(\"--output-dir\", default=\".\", help=\"path where to save\")\n parser.add_argument(\"--resume\", default=\"\", help=\"resume from checkpoint\")\n parser.add_argument(\"--start_epoch\", default=0, type=int, help=\"start epoch\")\n parser.add_argument(\"--aspect-ratio-group-factor\", default=3, type=int)\n parser.add_argument(\n \"--rpn-score-thresh\", default=None, type=float, help=\"rpn score threshold for faster-rcnn\"\n )\n parser.add_argument(\n \"--trainable-backbone-layers\",\n default=None,\n type=int,\n help=\"number of trainable layers of backbone\",\n )\n parser.add_argument(\n \"--test-only\",\n dest=\"test_only\",\n help=\"Only test the model\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--pretrained\",\n dest=\"pretrained\",\n help=\"Use pre-trained models from the modelzoo\",\n action=\"store_true\",\n )\n\n # distributed training parameters\n parser.add_argument(\"--world-size\", default=1, type=int, help=\"number of distributed processes\")\n parser.add_argument(\n \"--dist-url\", default=\"env://\", help=\"url used to set up distributed training\"\n )\n\n args = parser.parse_args()\n\n if args.output_dir:\n utils.mkdir(args.output_dir)\n main(args)\n"
] |
[
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.utils.data.distributed.DistributedSampler",
"torch.load",
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"torch.utils.data.RandomSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.optim.SGD",
"torch.device",
"torch.utils.data.BatchSampler"
]
] |
berkgercek/iblscripts
|
[
"c11988ff5659bcdd7cb9bf14e6589f0f7f946938"
] |
[
"ci/tests/test_ephys_pipeline.py"
] |
[
"import logging\nimport shutil\nfrom pathlib import Path\nimport numpy as np\nimport tempfile\n\nimport alf.io\nfrom ibllib.pipes import local_server\nfrom oneibl.one import ONE\n\nfrom ci.tests import base\n\nCACHE_DIR = tempfile.TemporaryDirectory()\n_logger = logging.getLogger('ibllib')\n\n\nclass TestEphysPipeline(base.IntegrationTest):\n\n def setUp(self) -> None:\n self.session_path = self.data_path.joinpath(\"ephys/choice_world/KS022/2019-12-10/001\")\n # one = ONE(base_url='http://localhost:8000')\n self.one = ONE(base_url='https://test.alyx.internationalbrainlab.org',\n username='test_user', password='TapetesBloc18',\n cache_dir=Path(CACHE_DIR.name))\n self.init_folder = self.data_path.joinpath('ephys', 'choice_world_init')\n if not self.init_folder.exists():\n return\n self.main_folder = self.data_path.joinpath('ephys', 'choice_world')\n if self.main_folder.exists():\n shutil.rmtree(self.main_folder)\n self.main_folder.mkdir(exist_ok=True)\n for ff in self.init_folder.rglob('*.*'):\n link = self.main_folder.joinpath(ff.relative_to(self.init_folder))\n if 'alf' in link.parts:\n continue\n link.parent.mkdir(exist_ok=True, parents=True)\n link.symlink_to(ff)\n self.session_path.joinpath('raw_session.flag').touch()\n\n def test_pipeline_with_alyx(self):\n \"\"\"\n Test the ephys pipeline exactly as it is supposed to run on the local servers\n :return:\n \"\"\"\n one = self.one\n # first step is to remove the session and create it anew\n eid = one.eid_from_path(self.session_path, use_cache=False)\n if eid is not None:\n one.alyx.rest('sessions', 'delete', id=eid)\n\n # create the jobs and run them\n raw_ds = local_server.job_creator(self.session_path, one=one, max_md5_size=1024 * 1024 * 20)\n eid = one.eid_from_path(self.session_path, use_cache=False)\n self.assertFalse(eid is None) # the session is created on the database\n # the flag has been erased\n self.assertFalse(self.session_path.joinpath('raw_session.flag').exists())\n\n eid = one.eid_from_path(self.session_path, use_cache=False)\n subject_path = self.session_path.parents[2]\n tasks_dict = one.alyx.rest('tasks', 'list', session=eid, status='Waiting')\n for td in tasks_dict:\n print(td['name'])\n all_datasets = local_server.tasks_runner(\n subject_path, tasks_dict, one=one, max_md5_size=1024 * 1024 * 20, count=20)\n\n # check the trajectories and probe info\n self.assertTrue(len(one.alyx.rest('insertions', 'list', session=eid)) == 2)\n self.assertTrue(len(one.alyx.rest(\n 'trajectories', 'list', session=eid, provenance='Micro-manipulator')) == 2)\n\n # check the spike sorting output on disk\n self.check_spike_sorting_output(self.session_path)\n\n # check the registration of datasets\n dsets = one.alyx.rest('datasets', 'list', session=eid)\n self.assertEqual(set([ds['url'][-36:] for ds in dsets]),\n set([ds['id'] for ds in all_datasets + raw_ds]))\n\n nss = 2\n EXPECTED_DATASETS = [('_iblqc_ephysSpectralDensity.freqs', 4, 4),\n ('_iblqc_ephysSpectralDensity.power', 4, 4),\n ('_iblqc_ephysTimeRms.rms', 4, 4),\n ('_iblqc_ephysTimeRms.timestamps', 4, 4),\n\n ('_iblrig_Camera.frame_counter', 3, 3),\n ('_iblrig_Camera.GPIO', 3, 3),\n ('_iblrig_Camera.raw', 3, 3),\n ('_iblrig_Camera.timestamps', 3, 3),\n ('_iblrig_micData.raw', 1, 1),\n\n\n ('_spikeglx_sync.channels', 2, 3),\n ('_spikeglx_sync.polarities', 2, 3),\n ('_spikeglx_sync.times', 2, 3),\n\n ('camera.times', 3, 3),\n\n ('kilosort.whitening_matrix', nss, nss),\n ('_kilosort_raw.output', nss, nss),\n ('_phy_spikes_subset.channels', nss, nss),\n ('_phy_spikes_subset.spikes', nss, nss),\n ('_phy_spikes_subset.waveforms', nss, nss),\n\n ('channels.localCoordinates', nss, nss),\n ('channels.rawInd', nss, nss),\n ('clusters.amps', nss, nss),\n ('clusters.channels', nss, nss),\n ('clusters.depths', nss, nss),\n ('clusters.probes', 0, 0),\n ('clusters.metrics', nss, nss),\n ('clusters.peakToTrough', nss, nss),\n ('clusters.uuids', nss, nss),\n ('clusters.waveforms', nss, nss),\n ('clusters.waveformsChannels', nss, nss),\n\n # ('ephysData.raw.ap', 2, 2),\n # ('ephysData.raw.lf', 2, 2),\n # ('ephysData.raw.ch', 4, 5),\n # ('ephysData.raw.meta', 4, 5),\n ('ephysData.raw.sync', 2, 2),\n ('ephysData.raw.timestamps', 2, 2),\n # ('ephysData.raw.wiring', 2, 3),\n\n ('probes.description', 1, 1),\n ('probes.trajectory', 1, 1),\n ('spikes.amps', nss, nss),\n ('spikes.clusters', nss, nss),\n ('spikes.depths', nss, nss),\n ('spikes.templates', nss, nss),\n ('spikes.times', nss, nss),\n ('templates.waveforms', nss, nss),\n ('templates.waveformsChannels', nss, nss),\n ('templates.amps', nss, nss),\n\n ('trials.choice', 1, 1),\n ('trials.contrastLeft', 1, 1),\n ('trials.contrastRight', 1, 1),\n ('trials.feedback_times', 1, 1),\n ('trials.feedbackType', 1, 1),\n ('trials.firstMovement_times', 1, 1),\n ('trials.goCue_times', 1, 1),\n ('trials.goCueTrigger_times', 1, 1),\n ('trials.intervals', 2, 2),\n ('trials.probabilityLeft', 1, 1),\n ('trials.response_times', 1, 1),\n ('trials.rewardVolume', 1, 1),\n ('trials.stimOff_times', 1, 1),\n ('trials.stimOn_times', 1, 1),\n ('wheel.position', 1, 1),\n ('wheel.timestamps', 1, 1),\n ('wheelMoves.intervals', 1, 1),\n ('wheelMoves.peakAmplitude', 1, 1),\n# Min is 0 because this session fails extraction properr extraction test in test_ephys_passive\n ('_ibl_passivePeriods.intervalsTable', 0, 1),\n ('_ibl_passiveRFM.times', 0, 1),\n ('_ibl_passiveGabor.table', 0, 1),\n ('_ibl_passiveStims.table', 0, 1),\n ]\n # check that we indeed find expected number of datasets after registration\n # for this we need to get the unique set of datasets\n dids = np.array([d['id'] for d in all_datasets])\n assert set(dids).issubset(set([ds['url'][-36:] for ds in dsets]))\n dtypes = sorted([ds['dataset_type'] for ds in dsets])\n success = True\n for ed in EXPECTED_DATASETS:\n count = sum([1 if ed[0] == dt else 0 for dt in dtypes])\n if not ed[1] <= count <= ed[2]:\n _logger.critical(f'missing dataset types: {ed[0]} found {count}, '\n f'expected between [{ed[1]} and {ed[2]}]')\n success = False\n else:\n _logger.info(f'check dataset types registration OK: {ed[0]}')\n self.assertTrue(success)\n # check that the task QC was successfully run\n session_dict = one.alyx.rest('sessions', 'read', id=eid)\n self.assertNotEqual('NOT_SET', session_dict['qc'], 'qc field not updated')\n extended = session_dict['extended_qc']\n self.assertTrue(any(k.startswith('_task_') for k in extended.keys()))\n # also check that the behaviour criteron was set\n assert 'behavior' in extended\n # check that the probes insertions have the json field labeled properly\n pis = one.alyx.rest('insertions', 'list', session=eid)\n for pi in pis:\n assert('n_units' in pi['json'])\n\n def check_spike_sorting_output(self, session_path):\n \"\"\" Check the spikes object \"\"\"\n spikes_attributes = ['depths', 'amps', 'clusters', 'times', 'templates', 'samples']\n probe_folders = list(set([p.parent for p in session_path.joinpath(\n 'alf').rglob('spikes.times.npy')]))\n for probe_folder in probe_folders:\n spikes = alf.io.load_object(probe_folder, 'spikes')\n self.assertTrue(np.max(spikes.times) > 1000)\n self.assertTrue(alf.io.check_dimensions(spikes) == 0)\n # check that it contains the proper keys\n self.assertTrue(set(spikes.keys()).issubset(set(spikes_attributes)))\n self.assertTrue(np.nanmin(spikes.depths) >= 0)\n self.assertTrue(np.nanmax(spikes.depths) <= 3840)\n self.assertTrue(80 < np.median(spikes.amps) * 1e6 < 200) # we expect Volts\n\n \"\"\"Check the clusters object\"\"\"\n clusters = alf.io.load_object(probe_folder, 'clusters')\n clusters_attributes = ['depths', 'channels', 'peakToTrough', 'amps',\n 'uuids', 'waveforms', 'waveformsChannels', 'metrics']\n self.assertTrue(np.unique([clusters[k].shape[0] for k in clusters]).size == 1)\n self.assertTrue(set(clusters_attributes) == set(clusters.keys()))\n self.assertTrue(10 < np.nanmedian(clusters.amps) * 1e6 < 80) # we expect Volts\n self.assertTrue(0 < np.median(np.abs(clusters.peakToTrough)) < 5) # we expect ms\n\n \"\"\"Check the channels object\"\"\"\n channels = alf.io.load_object(probe_folder, 'channels')\n channels_attributes = ['rawInd', 'localCoordinates']\n self.assertTrue(set(channels.keys()) == set(channels_attributes))\n\n \"\"\"Check the template object\"\"\"\n templates = alf.io.load_object(probe_folder, 'templates')\n templates_attributes = ['amps', 'waveforms', 'waveformsChannels']\n self.assertTrue(set(templates.keys()) == set(templates_attributes))\n self.assertTrue(np.unique([templates[k].shape[0] for k in templates]).size == 1)\n # \"\"\"Check the probes object\"\"\"\n probes_attributes = ['description', 'trajectory']\n probes = alf.io.load_object(session_path.joinpath('alf'), 'probes')\n self.assertTrue(set(probes.keys()) == set(probes_attributes))\n\n \"\"\"check sample waveforms and make sure amplitudes check out\"\"\"\n swv = alf.io.load_object(probe_folder, 'spikes_subset')\n swv_attributes = ['spikes', 'channels', 'waveforms']\n self.assertTrue(set(swv_attributes) == set(swv.keys()))\n iswv = 20001\n it = spikes.templates[swv.spikes[iswv]]\n _, ics, ict = np.intersect1d(swv.channels[iswv], templates.waveformsChannels[it],\n return_indices=True)\n iw = templates.waveforms[it][:, ict] != 0\n self.assertTrue(np.median(np.abs(swv.waveforms[iswv][:, ics][iw])) < 1e-3)\n self.assertTrue(np.median(np.abs(templates.waveforms[it][:, ict][iw])) < 1e-3)\n\n \"\"\"(basic) check cross-references\"\"\"\n nclusters = clusters.depths.size\n nchannels = channels.rawInd.size\n ntemplates = templates.waveforms.shape[0]\n self.assertTrue(np.all(0 <= spikes.clusters) and\n np.all(spikes.clusters <= (nclusters - 1)))\n self.assertTrue(np.all(0 <= spikes.templates) and\n np.all(spikes.templates <= (ntemplates - 1)))\n self.assertTrue(np.all(0 <= clusters.channels) and\n np.all(clusters.channels <= (nchannels - 1)))\n # check that the site positions channels match the depth with indexing\n self.assertTrue(np.all(clusters.depths == channels.localCoordinates[clusters.channels, 1]))\n\n \"\"\" compare against the cortexlab spikes Matlab code output if fixtures exist \"\"\"\n for famps in session_path.joinpath(\n 'raw_ephys_data', probe_folder.parts[-1]).rglob('expected_amps_V_matlab.npy'):\n expected_amps = np.load(famps)\n # the difference is within 2 uV\n assert np.max(np.abs((spikes.amps * 1e6 - np.squeeze(expected_amps)))) < 2\n _logger.info('checked ' + '/'.join(famps.parts[-2:]))\n\n for fdepths in session_path.joinpath(\n 'raw_ephys_data', probe_folder.parts[-1]).rglob('expected_dephts_um_matlab.npy'):\n expected_depths = np.load(fdepths)\n # the difference is within 2 uV\n assert np.nanmax(np.abs((spikes.depths - np.squeeze(expected_depths)))) < .01\n _logger.info('checked ' + '/'.join(fdepths.parts[-2:]))\n\n\nif __name__ == \"__main__\":\n import unittest\n unittest.main(exit=False)\n"
] |
[
[
"numpy.nanmax",
"numpy.nanmedian",
"numpy.abs",
"numpy.unique",
"numpy.median",
"numpy.nanmin",
"numpy.squeeze",
"numpy.all",
"numpy.intersect1d",
"numpy.max",
"numpy.load",
"numpy.array"
]
] |
kemaleren/PaCMAP
|
[
"a171106bbafeff62cb27df898c8777ae2f67dbfe"
] |
[
"source_code/pacmap/pacmap.py"
] |
[
"import numpy as np\nfrom sklearn.base import BaseEstimator\nimport numba\nfrom annoy import AnnoyIndex\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.decomposition import PCA\nfrom sklearn import preprocessing\nimport time\nimport math\nimport datetime\nimport warnings\n\nglobal _RANDOM_STATE\n_RANDOM_STATE = None\n\n\[email protected](\"f4(f4[:])\")\ndef l2_norm(x):\n \"\"\"\n L2 norm of a vector.\n \"\"\"\n result = 0.0\n for i in range(x.shape[0]):\n result += x[i] ** 2\n return np.sqrt(result)\n\n\[email protected](\"f4(f4[:],f4[:])\")\ndef euclid_dist(x1, x2):\n \"\"\"\n Euclidean distance between two vectors.\n \"\"\"\n result = 0.0\n for i in range(x1.shape[0]):\n result += (x1[i] - x2[i]) ** 2\n return np.sqrt(result)\n\n\[email protected](\"f4(f4[:],f4[:])\")\ndef manhattan_dist(x1, x2):\n \"\"\"\n Manhattan distance between two vectors.\n \"\"\"\n result = 0.0\n for i in range(x1.shape[0]):\n result += np.abs(x1[i] - x2[i])\n return result\n\n\[email protected](\"f4(f4[:],f4[:])\")\ndef angular_dist(x1, x2):\n \"\"\"\n Angular (i.e. cosine) distance between two vectors.\n \"\"\"\n x1_norm = np.maximum(l2_norm(x1), 1e-20)\n x2_norm = np.maximum(l2_norm(x2), 1e-20)\n result = 0.0\n for i in range(x1.shape[0]):\n result += x1[i] * x2[i]\n return np.sqrt(2.0 - 2.0 * result / x1_norm / x2_norm)\n\n\[email protected](\"f4(f4[:],f4[:])\")\ndef hamming_dist(x1, x2):\n \"\"\"\n Hamming distance between two vectors.\n \"\"\"\n result = 0.0\n for i in range(x1.shape[0]):\n if x1[i] != x2[i]:\n result += 1.0\n return result\n\n\[email protected]()\ndef calculate_dist(x1, x2, distance_index):\n if distance_index == 0: # euclidean\n return euclid_dist(x1, x2)\n elif distance_index == 1: # manhattan\n return manhattan_dist(x1, x2)\n elif distance_index == 2: # angular\n return angular_dist(x1, x2)\n elif distance_index == 3: # hamming\n return hamming_dist(x1, x2)\n\n\[email protected](\"i4[:](i4,i4,i4[:])\", nogil=True)\ndef sample_FP(n_samples, maximum, reject_ind):\n result = np.empty(n_samples, dtype=np.int32)\n for i in range(n_samples):\n reject_sample = True\n while reject_sample:\n j = np.random.randint(maximum)\n for k in range(i):\n if j == result[k]:\n break\n for k in range(reject_ind.shape[0]):\n if j == reject_ind[k]:\n break\n else:\n reject_sample = False\n result[i] = j\n return result\n\n\[email protected](\"i4[:,:](f4[:,:],f4[:,:],i4[:,:],i4)\", parallel=True, nogil=True)\ndef sample_neighbors_pair(X, scaled_dist, nbrs, n_neighbors):\n n = X.shape[0]\n pair_neighbors = np.empty((n*n_neighbors, 2), dtype=np.int32)\n\n for i in numba.prange(n):\n scaled_sort = np.argsort(scaled_dist[i])\n for j in numba.prange(n_neighbors):\n pair_neighbors[i*n_neighbors + j][0] = i\n pair_neighbors[i*n_neighbors + j][1] = nbrs[i][scaled_sort[j]]\n return pair_neighbors\n\n\[email protected](\"i4[:,:](f4[:,:],i4,i4)\", nogil=True)\ndef sample_MN_pair(X, n_MN, option=0):\n n = X.shape[0]\n pair_MN = np.empty((n*n_MN, 2), dtype=np.int32)\n for i in numba.prange(n):\n for jj in range(n_MN):\n sampled = np.random.randint(0, n, 6)\n dist_list = np.empty((6), dtype=np.float32)\n for t in range(sampled.shape[0]):\n dist_list[t] = calculate_dist(X[i], X[sampled[t]], distance_index=option)\n min_dic = np.argmin(dist_list)\n dist_list = np.delete(dist_list, [min_dic])\n sampled = np.delete(sampled, [min_dic])\n picked = sampled[np.argmin(dist_list)]\n pair_MN[i*n_MN + jj][0] = i\n pair_MN[i*n_MN + jj][1] = picked\n return pair_MN\n\n\[email protected](\"i4[:,:](f4[:,:],i4,i4,i4)\", nogil=True)\ndef sample_MN_pair_deterministic(X, n_MN, random_state, option=0):\n n = X.shape[0]\n pair_MN = np.empty((n*n_MN, 2), dtype=np.int32)\n for i in numba.prange(n):\n for jj in range(n_MN):\n # Shifting the seed to prevent sampling the same pairs\n np.random.seed(random_state + i * n_MN + jj) \n sampled = np.random.randint(0, n, 6)\n dist_list = np.empty((6), dtype=np.float32)\n for t in range(sampled.shape[0]):\n dist_list[t] = calculate_dist(X[i], X[sampled[t]], distance_index=option)\n min_dic = np.argmin(dist_list)\n dist_list = np.delete(dist_list, [min_dic])\n sampled = np.delete(sampled, [min_dic])\n picked = sampled[np.argmin(dist_list)]\n pair_MN[i*n_MN + jj][0] = i\n pair_MN[i*n_MN + jj][1] = picked\n return pair_MN\n\n\[email protected](\"i4[:,:](f4[:,:],i4[:,:],i4,i4)\", parallel=True, nogil=True)\ndef sample_FP_pair(X, pair_neighbors, n_neighbors, n_FP):\n n = X.shape[0]\n pair_FP = np.empty((n * n_FP, 2), dtype=np.int32)\n for i in numba.prange(n):\n for k in numba.prange(n_FP):\n FP_index = sample_FP(\n n_FP, n, pair_neighbors[i*n_neighbors: i*n_neighbors + n_neighbors][1])\n pair_FP[i*n_FP + k][0] = i\n pair_FP[i*n_FP + k][1] = FP_index[k]\n return pair_FP\n\n\[email protected](\"i4[:,:](f4[:,:],i4[:,:],i4,i4,i4)\", parallel=True, nogil=True)\ndef sample_FP_pair_deterministic(X, pair_neighbors, n_neighbors, n_FP, random_state):\n n = X.shape[0]\n pair_FP = np.empty((n * n_FP, 2), dtype=np.int32)\n for i in numba.prange(n):\n for k in numba.prange(n_FP):\n np.random.seed(random_state+i*n_FP+k)\n FP_index = sample_FP(\n n_FP, n, pair_neighbors[i*n_neighbors: i*n_neighbors + n_neighbors][1])\n pair_FP[i*n_FP + k][0] = i\n pair_FP[i*n_FP + k][1] = FP_index[k]\n return pair_FP\n\n\[email protected](\"f4[:,:](f4[:,:],f4[:],i4[:,:])\", parallel=True, nogil=True)\ndef scale_dist(knn_distance, sig, nbrs):\n n, num_neighbors = knn_distance.shape\n scaled_dist = np.zeros((n, num_neighbors), dtype=np.float32)\n for i in numba.prange(n):\n for j in numba.prange(num_neighbors):\n scaled_dist[i, j] = knn_distance[i, j] ** 2 / \\\n sig[i] / sig[nbrs[i, j]]\n return scaled_dist\n\n\[email protected](\"void(f4[:,:],f4[:,:],f4[:,:],f4[:,:],f4,f4,f4,i4)\", parallel=True, nogil=True)\ndef update_embedding_adam(Y, grad, m, v, beta1, beta2, lr, itr):\n n, dim = Y.shape\n lr_t = lr * math.sqrt(1.0 - beta2**(itr+1)) / (1.0 - beta1**(itr+1))\n for i in numba.prange(n):\n for d in numba.prange(dim):\n m[i][d] += (1 - beta1) * (grad[i][d] - m[i][d])\n v[i][d] += (1 - beta2) * (grad[i][d]**2 - v[i][d])\n Y[i][d] -= lr_t * m[i][d]/(math.sqrt(v[i][d]) + 1e-7)\n\n\[email protected](\"f4[:,:](f4[:,:],i4[:,:],i4[:,:],i4[:,:],f4,f4,f4)\", parallel=True, nogil=True)\ndef pacmap_grad(Y, pair_neighbors, pair_MN, pair_FP, w_neighbors, w_MN, w_FP):\n n, dim = Y.shape\n grad = np.zeros((n+1, dim), dtype=np.float32)\n y_ij = np.empty(dim, dtype=np.float32)\n loss = np.zeros(3, dtype=np.float32)\n for t in range(pair_neighbors.shape[0]):\n i = pair_neighbors[t, 0]\n j = pair_neighbors[t, 1]\n d_ij = 1.0\n for d in range(dim):\n y_ij[d] = Y[i, d] - Y[j, d]\n d_ij += y_ij[d] ** 2\n loss[0] += w_neighbors * (d_ij/(10. + d_ij))\n w1 = w_neighbors * (20./(10. + d_ij) ** 2)\n for d in range(dim):\n grad[i, d] += w1 * y_ij[d]\n grad[j, d] -= w1 * y_ij[d]\n for tt in range(pair_MN.shape[0]):\n i = pair_MN[tt, 0]\n j = pair_MN[tt, 1]\n d_ij = 1.0\n for d in range(dim):\n y_ij[d] = Y[i][d] - Y[j][d]\n d_ij += y_ij[d] ** 2\n loss[1] += w_MN * d_ij/(10000. + d_ij)\n w = w_MN * 20000./(10000. + d_ij) ** 2\n for d in range(dim):\n grad[i, d] += w * y_ij[d]\n grad[j, d] -= w * y_ij[d]\n for ttt in range(pair_FP.shape[0]):\n i = pair_FP[ttt, 0]\n j = pair_FP[ttt, 1]\n d_ij = 1.0\n for d in range(dim):\n y_ij[d] = Y[i, d] - Y[j, d]\n d_ij += y_ij[d] ** 2\n loss[2] += w_FP * 1./(1. + d_ij)\n w1 = w_FP * 2./(1. + d_ij) ** 2\n for d in range(dim):\n grad[i, d] -= w1 * y_ij[d]\n grad[j, d] += w1 * y_ij[d]\n grad[-1, 0] = loss.sum()\n return grad\n\n\ndef distance_to_option(distance='euclidean'):\n if distance == 'euclidean':\n option = 0\n elif distance == 'manhattan':\n option = 1\n elif distance == 'angular':\n option = 2\n elif distance == 'hamming':\n option = 3\n else:\n raise NotImplementedError('Distance other than euclidean, manhattan,' + \\\n 'angular or hamming is not supported')\n return option\n\n\ndef generate_pair(\n X,\n n_neighbors,\n n_MN,\n n_FP,\n distance='euclidean',\n verbose=True\n):\n n, dim = X.shape\n n_neighbors_extra = min(n_neighbors + 50, n)\n tree = AnnoyIndex(dim, metric=distance)\n if _RANDOM_STATE is not None:\n tree.set_seed(_RANDOM_STATE)\n for i in range(n):\n tree.add_item(i, X[i, :])\n tree.build(20)\n\n option = distance_to_option(distance=distance)\n\n nbrs = np.zeros((n, n_neighbors_extra), dtype=np.int32)\n knn_distances = np.empty((n, n_neighbors_extra), dtype=np.float32)\n\n for i in range(n):\n nbrs_ = tree.get_nns_by_item(i, n_neighbors_extra+1)\n nbrs[i, :] = nbrs_[1:]\n for j in range(n_neighbors_extra):\n knn_distances[i, j] = tree.get_distance(i, nbrs[i, j])\n if verbose:\n print(\"Found nearest neighbor\")\n sig = np.maximum(np.mean(knn_distances[:, 3:6], axis=1), 1e-10)\n if verbose:\n print(\"Calculated sigma\")\n scaled_dist = scale_dist(knn_distances, sig, nbrs)\n if verbose:\n print(\"Found scaled dist\")\n pair_neighbors = sample_neighbors_pair(X, scaled_dist, nbrs, n_neighbors)\n if _RANDOM_STATE is None:\n pair_MN = sample_MN_pair(X, n_MN, option)\n pair_FP = sample_FP_pair(X, pair_neighbors, n_neighbors, n_FP)\n else:\n pair_MN = sample_MN_pair_deterministic(X, n_MN, _RANDOM_STATE, option)\n pair_FP = sample_FP_pair_deterministic(X, pair_neighbors, n_neighbors, n_FP, _RANDOM_STATE)\n return pair_neighbors, pair_MN, pair_FP\n\n\ndef generate_pair_no_neighbors(\n X,\n n_neighbors,\n n_MN,\n n_FP,\n pair_neighbors,\n distance='euclidean',\n verbose=True\n):\n option = distance_to_option(distance=distance)\n\n if _RANDOM_STATE is None:\n pair_MN = sample_MN_pair(X, n_MN, option)\n pair_FP = sample_FP_pair(X, pair_neighbors, n_neighbors, n_FP)\n else:\n if verbose:\n print(\"Triggered\")\n pair_MN = sample_MN_pair_deterministic(X, n_MN, _RANDOM_STATE, option)\n pair_FP = sample_FP_pair_deterministic(X, pair_neighbors, n_neighbors, n_FP, _RANDOM_STATE)\n return pair_neighbors, pair_MN, pair_FP\n\n\ndef pacmap(\n X,\n n_dims,\n n_neighbors,\n n_MN,\n n_FP,\n pair_neighbors,\n pair_MN,\n pair_FP,\n distance,\n lr,\n num_iters,\n Yinit,\n apply_pca,\n verbose,\n intermediate,\n seed=0\n):\n start_time = time.time()\n n, high_dim = X.shape\n\n if intermediate:\n itr_dic = [0, 10, 30, 60, 100, 120, 140, 170, 200, 250, 300, 350, 450]\n intermediate_states = np.empty((13, n, 2), dtype=np.float32)\n else:\n intermediate_states = None\n\n pca_solution = False\n if pair_neighbors is None:\n if verbose:\n print(\"Finding pairs\")\n if distance != \"hamming\":\n if high_dim > 100 and apply_pca:\n X -= np.mean(X, axis=0)\n X = TruncatedSVD(n_components=100,\n random_state=seed).fit_transform(X)\n pca_solution = True\n if verbose:\n print(\"Applied PCA, the dimensionality becomes 100\")\n else:\n X -= np.min(X)\n X /= np.max(X)\n X -= np.mean(X, axis=0)\n if verbose:\n print(\"X is normalized\")\n pair_neighbors, pair_MN, pair_FP = generate_pair(\n X, n_neighbors, n_MN, n_FP, distance, verbose\n )\n if verbose:\n print(\"Pairs sampled successfully.\")\n elif pair_MN is None and pair_FP is None:\n if verbose:\n print(\"Using user provided nearest neighbor pairs.\")\n try:\n assert(pair_neighbors.shape == (n * n_neighbors, 2))\n except AssertionError:\n print(\"The shape of the user provided nearest neighbor pairs is incorrect.\")\n raise ValueError\n pair_neighbors, pair_MN, pair_FP = generate_pair_no_neighbors(\n X, n_neighbors, n_MN, n_FP, pair_neighbors, distance, verbose\n )\n if verbose:\n print(\"Pairs sampled successfully.\")\n else:\n if verbose:\n print(\"Using stored pairs.\")\n\n if Yinit is None or Yinit == \"pca\":\n if pca_solution:\n Y = 0.01 * X[:, :n_dims]\n else:\n Y = 0.01 * \\\n PCA(n_components=n_dims, random_state=_RANDOM_STATE).fit_transform(X).astype(np.float32)\n elif Yinit == \"random\":\n if _RANDOM_STATE is not None:\n np.random.seed(_RANDOM_STATE)\n Y = np.random.normal(size=[n, n_dims]).astype(np.float32) * 0.0001\n else: # user_supplied matrix\n Yinit = Yinit.astype(np.float32)\n scaler = preprocessing.StandardScaler().fit(Yinit)\n Y = scaler.transform(Yinit) * 0.0001\n\n w_MN_init = 1000.\n beta1 = 0.9\n beta2 = 0.999\n m = np.zeros_like(Y, dtype=np.float32)\n v = np.zeros_like(Y, dtype=np.float32)\n\n if intermediate:\n itr_ind = 1\n intermediate_states[0, :, :] = Y\n\n for itr in range(num_iters):\n if itr < 100:\n w_MN = (1 - itr/100) * w_MN_init + itr/100 * 3.0\n w_neighbors = 2.0\n w_FP = 1.0\n elif itr < 200:\n w_MN = 3.0\n w_neighbors = 3\n w_FP = 1\n else:\n w_MN = 0.0\n w_neighbors = 1.\n w_FP = 1.\n\n grad = pacmap_grad(Y, pair_neighbors, pair_MN,\n pair_FP, w_neighbors, w_MN, w_FP)\n C = grad[-1, 0]\n if verbose and itr == 0:\n print(f\"Initial Loss: {C}\")\n update_embedding_adam(Y, grad, m, v, beta1, beta2, lr, itr)\n\n if intermediate:\n if (itr+1) == itr_dic[itr_ind]:\n intermediate_states[itr_ind, :, :] = Y\n itr_ind += 1\n if itr_ind > 12:\n itr_ind -= 1\n if verbose:\n if (itr + 1) % 10 == 0:\n print(\"Iteration: %4d, Loss: %f\" % (itr + 1, C))\n\n if verbose:\n elapsed = str(datetime.timedelta(seconds=time.time() - start_time))\n print(\"Elapsed time: %s\" % (elapsed))\n return Y, intermediate_states, pair_neighbors, pair_MN, pair_FP\n\n\nclass PaCMAP(BaseEstimator):\n def __init__(self,\n n_dims=2,\n n_neighbors=10,\n MN_ratio=0.5,\n FP_ratio=2.0,\n pair_neighbors=None,\n pair_MN=None,\n pair_FP=None,\n distance=\"euclidean\",\n lr=1.0,\n num_iters=450,\n verbose=False,\n apply_pca=True,\n intermediate=False,\n random_state=None\n ):\n self.n_dims = n_dims\n self.n_neighbors = n_neighbors\n self.MN_ratio = MN_ratio\n self.FP_ratio = FP_ratio\n self.pair_neighbors = pair_neighbors\n self.pair_MN = pair_MN\n self.pair_FP = pair_FP\n self.distance = distance\n self.lr = lr\n self.num_iters = num_iters\n self.apply_pca = apply_pca\n self.verbose = verbose\n self.intermediate = intermediate\n global _RANDOM_STATE\n if random_state is not None:\n assert(isinstance(random_state, int))\n self.random_state = random_state\n _RANDOM_STATE = random_state # Set random state for numba functions\n if verbose:\n print(f'Warning: random state is set to {_RANDOM_STATE}')\n else:\n self.random_state = 0\n _RANDOM_STATE = None # Reset random state\n if verbose:\n print(f'Warning: random state is removed')\n\n\n if self.n_dims < 2:\n raise ValueError(\n \"The number of projection dimensions must be at least 2\")\n if self.lr <= 0:\n raise ValueError(\"The learning rate must be larger than 0\")\n if self.distance == \"hamming\" and apply_pca:\n warnings.warn(\"apply_pca = True for Hamming distance.\")\n if not self.apply_pca:\n print(\n \"Warning: running ANNOY Indexing on high-dimensional data. Nearest-neighbor search may be slow!\")\n\n def fit(self, X, init=None, save_pairs=True):\n X = X.astype(np.float32)\n n, dim = X.shape\n if n <= 0:\n raise ValueError(\"The sample size must be larger than 0\")\n if self.n_neighbors is None:\n if n <= 10000:\n self.n_neighbors = 10\n else:\n self.n_neighbors = int(round(10 + 15 * (np.log10(n) - 4)))\n self.n_MN = int(round(self.n_neighbors * self.MN_ratio))\n self.n_FP = int(round(self.n_neighbors * self.FP_ratio))\n if self.n_neighbors < 1:\n raise ValueError(\n \"The number of nearest neighbors can't be less than 1\")\n if self.n_FP < 1:\n raise ValueError(\n \"The number of further points can't be less than 1\")\n if self.verbose:\n print(\n \"PaCMAP(n_neighbors={}, n_MN={}, n_FP={}, distance={}, \"\n \"lr={}, n_iters={}, apply_pca={}, opt_method='adam', \"\n \"verbose={}, intermediate={}, seed={})\".format(\n self.n_neighbors,\n self.n_MN,\n self.n_FP,\n self.distance,\n self.lr,\n self.num_iters,\n self.apply_pca,\n self.verbose,\n self.intermediate,\n _RANDOM_STATE\n )\n )\n if save_pairs:\n self.embedding_, self.intermediate_states, self.pair_neighbors, self.pair_MN, self.pair_FP = pacmap(\n X,\n self.n_dims,\n self.n_neighbors,\n self.n_MN,\n self.n_FP,\n self.pair_neighbors,\n self.pair_MN,\n self.pair_FP,\n self.distance,\n self.lr,\n self.num_iters,\n init,\n self.apply_pca,\n self.verbose,\n self.intermediate,\n self.random_state\n )\n else:\n self.embedding_, self.intermediate_states, _, _, _ = pacmap(\n X,\n self.n_dims,\n self.n_neighbors,\n self.n_MN,\n self.n_FP,\n self.pair_neighbors,\n self.pair_MN,\n self.pair_FP,\n self.distance,\n self.lr,\n self.num_iters,\n init,\n self.apply_pca,\n self.verbose,\n self.intermediate,\n self.random_state\n )\n\n return self\n\n def fit_transform(self, X, init=None, save_pairs=True):\n self.fit(X, init, save_pairs)\n if self.intermediate:\n return self.intermediate_states\n else:\n return self.embedding_\n\n def sample_pairs(self, X):\n if self.verbose:\n print(\"sampling pairs\")\n X = X.astype(np.float32)\n n, dim = X.shape\n if n <= 0:\n raise ValueError(\"The sample size must be larger than 0\")\n if self.n_neighbors is None:\n if n <= 10000:\n self.n_neighbors = 10\n else:\n self.n_neighbors = int(round(10 + 15 * (np.log10(n) - 4)))\n self.n_MN = int(round(self.n_neighbors * self.MN_ratio))\n self.n_FP = int(round(self.n_neighbors * self.FP_ratio))\n if self.n_neighbors < 1:\n raise ValueError(\n \"The number of nearest neighbors can't be less than 1\")\n if self.n_FP < 1:\n raise ValueError(\n \"The number of further points can't be less than 1\")\n if self.distance != \"hamming\":\n if X.shape[1] > 100 and self.apply_pca:\n X -= np.mean(X, axis=0)\n X = TruncatedSVD(n_components=100,\n random_state=self.random_state).fit_transform(X)\n if self.verbose:\n print(\"applied PCA\")\n else:\n X -= np.min(X)\n X /= np.max(X)\n X -= np.mean(X, axis=0)\n self.pair_neighbors, self.pair_MN, self.pair_FP = generate_pair(\n X,\n self.n_neighbors,\n self.n_MN,\n self.n_FP,\n self.distance,\n self.verbose\n )\n if self.verbose:\n print(\"sampled pairs\")\n\n return self\n\n def del_pairs(self):\n self.pair_neighbors = None,\n self.pair_MN = None,\n self.pair_FP = None,\n return self\n"
] |
[
[
"sklearn.decomposition.TruncatedSVD",
"numpy.abs",
"numpy.sqrt",
"numpy.random.seed",
"numpy.min",
"numpy.max",
"numpy.delete",
"numpy.random.normal",
"numpy.zeros_like",
"numpy.mean",
"numpy.argmin",
"numpy.log10",
"numpy.argsort",
"sklearn.preprocessing.StandardScaler",
"numpy.zeros",
"sklearn.decomposition.PCA",
"numpy.empty",
"numpy.random.randint"
]
] |
BenevolentAI/RELVM
|
[
"468da9eb78714932612e54454bb06c4beac1bd1d"
] |
[
"run/unsup/__init__.py"
] |
[
"import os\nimport time\nimport numpy as np\nimport tensorflow as tf\n\n\nclass Run(object):\n \"\"\"\n This Run class handles initialising the trainer, loading data, creating the Tensorflow dataset and model training\n for the unsupervised representation model.\n\n Parameters\n ----------\n data_dir : str\n The directory containing the data. It should contain the following 3 memory-mapped Numpy arrays:\n entities_x.mmap: the entity types for the first entity in each sentence.\n entities_y.mmap: the entity types for the second entity in each sentence.\n contexts.mmap: the contexts in which each entity pair occurs.\n vocab : list[str]\n The list of valid tokens.\n entity_types : list[str]\n The list of valid entity types (effectively the vocabulary for the entities).\n num_data : int\n The number of data points in the above memory-mapped Numpy arrays.\n max_len : int\n The maximum length of the contexts.\n trainer : trainers.unsup.Trainer\n The trainer responsible for training the model.\n trainer_kwargs : dict\n Any keyword arguments with which to initialise the trainer.\n out_dir : str\n The directory where the outputs will be stored.\n pre_trained : bool\n Whether or not the model has already been trained.\n pre_trained_dir : str or None\n The directory from which to load pre-trained parameter values.\n \"\"\"\n\n def __init__(self, data_dir, vocab, entity_types, num_data, max_len, trainer, trainer_kwargs, out_dir,\n pre_trained=False, pre_trained_dir=None):\n\n self.vocab = vocab\n self.entity_types = entity_types\n\n self.num_data = num_data\n self.max_len = max_len\n\n self.strategy = tf.distribute.MirroredStrategy()\n\n with self.strategy.scope():\n self.trainer = trainer(strategy=self.strategy, **trainer_kwargs)\n\n self.entities_x, self.entities_y, self.contexts = self.load_data(data_dir)\n\n self.out_dir = out_dir\n\n self.pre_trained = pre_trained\n self.pre_trained_dir = pre_trained_dir\n\n trainable_variables = {v.name: v for v in self.trainer.trainable_variables}\n self.checkpoint = tf.train.Checkpoint(optimiser=self.trainer.optimiser, **trainable_variables)\n\n if self.pre_trained:\n self.checkpoint.restore(os.path.join(self.pre_trained_dir, 'saved_model'))\n\n def load_data(self, data_dir):\n \"\"\"\n Load the required data into Numpy arrays.\n\n Parameters\n ----------\n data_dir : str\n The directory containing the data. It should contain the following 3 memory-mapped Numpy arrays:\n entities_x.mmap: the entity types for the first entity in each sentence.\n entities_y.mmap: the entity types for the second entity in each sentence.\n contexts.mmap: the contexts in which each entity pair occurs\n\n Returns\n -------\n entities_x : np.memmap\n The indices to `self.entity_types` for the first entity in each sentence.\n entities_y : np.memmap\n The indices to `self.entity_types` for the second entity in each sentence.\n contexts : np.memmap\n The indices to `self.vocab` for the contexts in which each entity pair occurs.\n \"\"\"\n\n entities_x = np.memmap(os.path.join(data_dir, 'entities_x.mmap'), dtype=np.uint16, mode='r',\n shape=(self.num_data,))\n entities_y = np.memmap(os.path.join(data_dir, 'entities_y.mmap'), dtype=np.uint16, mode='r',\n shape=(self.num_data,))\n contexts = np.memmap(os.path.join(data_dir, 'contexts.mmap'), dtype=np.uint16, mode='r',\n shape=(self.num_data, self.max_len))\n\n return entities_x, entities_y, contexts\n\n def create_dataset(self, n_batch, n_iter_warm_up):\n \"\"\"\n Create the Tensorflow dataset to feed to the trainer.\n\n Parameters\n ----------\n n_batch : int\n The batch size.\n n_iter_warm_up : int\n The number of iterations over which the KL divergence will be annealed from 0 to 1.\n\n Returns\n -------\n tf.data.Dataset\n \"\"\"\n\n def gen():\n\n i = 1\n\n while True:\n\n inds = np.random.choice(self.num_data, size=(n_batch,))\n\n x = np.int32(self.entities_x[inds])\n y = np.int32(self.entities_y[inds])\n c = np.int32(self.contexts[inds])\n\n if n_iter_warm_up is not None:\n beta = np.float32(np.minimum(1., i / n_iter_warm_up))\n else:\n beta = np.float32(1.)\n\n i += 1\n\n yield x, y, c, np.tile(beta, (n_batch,))\n\n return tf.data.Dataset.from_generator(gen, output_types=(tf.int32, tf.int32, tf.int32, tf.float32),\n output_shapes=([None], [None], [None, self.max_len], [None])\n )\n\n def train(self, n_iter, n_batch, n_samples, n_iter_warm_up, save_freq=100000):\n \"\"\"\n Train the model, saving the parameters at a fixed frequency using Tensorflow checkpoints.\n\n Parameters\n ----------\n n_iter : int\n The number of iterations for which to train the model.\n n_batch : int\n The training batch size.\n n_samples : int\n The number of latent samples to draw during training.\n n_iter_warm_up : None or int\n The number of iterations over which to anneal the KL divergence term of the ELBO.\n save_freq : int\n The frequency (in iterations) at which to save the parameter values.\n\n Returns\n -------\n elbo_kl : array\n The array of (elbo,kl) values for all iterations (for testing)\n \"\"\"\n\n dataset_train = self.create_dataset(n_batch, n_iter_warm_up)\n dist_dataset_train = self.strategy.experimental_distribute_dataset(dataset_train)\n\n i = 1\n\n elbo_kl = np.zeros((n_iter, 2))\n\n with self.strategy.scope():\n for inputs in dist_dataset_train:\n start = time.perf_counter()\n\n elbo, kl = self.trainer.optimise(inputs, n_batch, n_samples)\n\n elbo_kl[i-1, 0] = elbo.numpy()\n elbo_kl[i-1, 1] = kl.numpy()\n\n print(\n 'Iteration ' + str(i) + ': objective = ' + str(elbo.numpy()) + ' kl = ' + str(kl.numpy()) +\n ' (time taken = ' + str(time.perf_counter() - start) + ' seconds)')\n\n if i % save_freq == 0:\n self.checkpoint.write(os.path.join(self.out_dir, 'saved_model'))\n\n if i >= n_iter:\n break\n\n i += 1\n\n self.checkpoint.write(os.path.join(self.out_dir, 'saved_model'))\n\n return elbo_kl\n"
] |
[
[
"numpy.minimum",
"numpy.random.choice",
"tensorflow.train.Checkpoint",
"numpy.int32",
"numpy.tile",
"tensorflow.data.Dataset.from_generator",
"numpy.float32",
"numpy.zeros",
"tensorflow.distribute.MirroredStrategy"
]
] |
dyollb/MONAI
|
[
"9084c452c48095c82c71d4391b3684006e5a3c56",
"9084c452c48095c82c71d4391b3684006e5a3c56"
] |
[
"monai/transforms/utils_create_transform_ims.py",
"tests/test_vote_ensembled.py"
] |
[
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport pathlib\nimport tempfile\nimport textwrap\nfrom copy import deepcopy\nfrom glob import glob\nfrom typing import TYPE_CHECKING, Callable\n\nimport numpy as np\nimport torch\n\nfrom monai.apps import download_and_extract\nfrom monai.transforms import (\n AddChanneld,\n Affine,\n Affined,\n AsDiscrete,\n Compose,\n Flip,\n Flipd,\n LoadImaged,\n MapTransform,\n Orientation,\n Orientationd,\n Rand3DElastic,\n Rand3DElasticd,\n RandFlip,\n RandFlipd,\n Randomizable,\n RandRotate,\n RandRotated,\n RandZoom,\n RandZoomd,\n Rotate,\n Rotate90,\n Rotate90d,\n Rotated,\n ScaleIntensity,\n ScaleIntensityd,\n SpatialPadd,\n Zoom,\n Zoomd,\n)\nfrom monai.transforms.croppad.array import (\n BorderPad,\n CenterScaleCrop,\n CenterSpatialCrop,\n CropForeground,\n DivisiblePad,\n RandCropByLabelClasses,\n RandCropByPosNegLabel,\n RandScaleCrop,\n RandSpatialCrop,\n RandSpatialCropSamples,\n RandWeightedCrop,\n ResizeWithPadOrCrop,\n SpatialCrop,\n SpatialPad,\n)\nfrom monai.transforms.croppad.dictionary import (\n BorderPadd,\n CenterScaleCropd,\n CenterSpatialCropd,\n CropForegroundd,\n DivisiblePadd,\n RandCropByLabelClassesd,\n RandCropByPosNegLabeld,\n RandScaleCropd,\n RandSpatialCropd,\n RandSpatialCropSamplesd,\n RandWeightedCropd,\n ResizeWithPadOrCropd,\n SpatialCropd,\n)\nfrom monai.transforms.intensity.array import (\n AdjustContrast,\n GaussianSharpen,\n GaussianSmooth,\n GibbsNoise,\n HistogramNormalize,\n KSpaceSpikeNoise,\n MaskIntensity,\n NormalizeIntensity,\n RandAdjustContrast,\n RandBiasField,\n RandCoarseDropout,\n RandCoarseShuffle,\n RandGaussianNoise,\n RandGaussianSharpen,\n RandGaussianSmooth,\n RandGibbsNoise,\n RandHistogramShift,\n RandKSpaceSpikeNoise,\n RandScaleIntensity,\n RandShiftIntensity,\n RandStdShiftIntensity,\n ScaleIntensityRange,\n ScaleIntensityRangePercentiles,\n ShiftIntensity,\n StdShiftIntensity,\n ThresholdIntensity,\n)\nfrom monai.transforms.intensity.dictionary import (\n AdjustContrastd,\n GaussianSharpend,\n GaussianSmoothd,\n GibbsNoised,\n HistogramNormalized,\n KSpaceSpikeNoised,\n MaskIntensityd,\n NormalizeIntensityd,\n RandAdjustContrastd,\n RandBiasFieldd,\n RandCoarseDropoutd,\n RandCoarseShuffled,\n RandGaussianNoised,\n RandGaussianSharpend,\n RandGaussianSmoothd,\n RandGibbsNoised,\n RandHistogramShiftd,\n RandKSpaceSpikeNoised,\n RandScaleIntensityd,\n RandShiftIntensityd,\n RandStdShiftIntensityd,\n ScaleIntensityRanged,\n ScaleIntensityRangePercentilesd,\n ShiftIntensityd,\n StdShiftIntensityd,\n ThresholdIntensityd,\n)\nfrom monai.transforms.post.array import KeepLargestConnectedComponent, LabelFilter, LabelToContour\nfrom monai.transforms.post.dictionary import AsDiscreted, KeepLargestConnectedComponentd, LabelFilterd, LabelToContourd\nfrom monai.transforms.spatial.array import (\n Rand2DElastic,\n RandAffine,\n RandAxisFlip,\n RandGridDistortion,\n RandRotate90,\n Resize,\n Spacing,\n)\nfrom monai.transforms.spatial.dictionary import (\n Rand2DElasticd,\n RandAffined,\n RandAxisFlipd,\n RandGridDistortiond,\n RandRotate90d,\n Resized,\n Spacingd,\n)\nfrom monai.utils.enums import CommonKeys\nfrom monai.utils.module import optional_import\n\nif TYPE_CHECKING:\n import matplotlib.pyplot as plt\n\n has_matplotlib = True\n\nelse:\n plt, has_matplotlib = optional_import(\"matplotlib.pyplot\")\n\n\ndef get_data(keys):\n \"\"\"Get the example data to be used.\n\n Use MarsAtlas as it only contains 1 image for quick download and\n that image is parcellated.\n \"\"\"\n cache_dir = os.environ.get(\"MONAI_DATA_DIRECTORY\") or tempfile.mkdtemp()\n fname = \"MarsAtlas-MNI-Colin27.zip\"\n url = \"https://www.dropbox.com/s/ndz8qtqblkciole/\" + fname + \"?dl=1\"\n out_path = os.path.join(cache_dir, \"MarsAtlas-MNI-Colin27\")\n zip_path = os.path.join(cache_dir, fname)\n\n download_and_extract(url, zip_path, out_path)\n\n image, label = sorted(glob(os.path.join(out_path, \"*.nii\")))\n\n data = {CommonKeys.IMAGE: image, CommonKeys.LABEL: label}\n\n transforms = Compose(\n [LoadImaged(keys), AddChanneld(keys), ScaleIntensityd(CommonKeys.IMAGE), Rotate90d(keys, spatial_axes=[0, 2])]\n )\n data = transforms(data)\n max_size = max(data[keys[0]].shape)\n padder = SpatialPadd(keys, (max_size, max_size, max_size))\n return padder(data)\n\n\ndef update_docstring(code_path, transform_name):\n \"\"\"\n Find the documentation for a given transform and if it's missing,\n add a pointer to the transform's example image.\n \"\"\"\n with open(code_path) as f:\n contents = f.readlines()\n doc_start = None\n for i, line in enumerate(contents):\n # find the line containing start of the transform documentation\n if \"`\" + transform_name + \"`\" in line:\n doc_start = i\n break\n if doc_start is None:\n raise RuntimeError(\"Couldn't find transform documentation\")\n\n # if image is already in docs, nothing to do\n image_line = doc_start + 2\n if \".. image\" in contents[image_line]:\n return\n\n # add the line for the image and the alt text\n contents_orig = deepcopy(contents)\n contents.insert(\n image_line,\n \".. image:: https://github.com/Project-MONAI/DocImages/raw/main/transforms/\" + transform_name + \".png\\n\",\n )\n contents.insert(image_line + 1, \" :alt: example of \" + transform_name + \"\\n\")\n\n # check that we've only added two lines\n assert len(contents) == len(contents_orig) + 2\n\n # write the updated doc to overwrite the original\n with open(code_path, \"w\") as f:\n f.writelines(contents)\n\n\ndef pre_process_data(data, ndim, is_map, is_post):\n \"\"\"If transform requires 2D data, then convert to 2D\"\"\"\n if ndim == 2:\n for k in keys:\n data[k] = data[k][..., data[k].shape[-1] // 2]\n if is_post:\n for k in keys:\n data[k] = torch.as_tensor(data[k])\n\n if is_map:\n return data\n return data[CommonKeys.LABEL] if is_post else data[CommonKeys.IMAGE]\n\n\ndef get_2d_slice(image, view, is_label):\n \"\"\"If image is 3d, get the central slice. If is already 2d, return as-is.\n If image is label, set 0 to np.nan.\n \"\"\"\n if image.ndim == 2:\n out = image\n else:\n shape = image.shape\n slices = [slice(0, s) for s in shape]\n _slice = shape[view] // 2\n slices[view] = slice(_slice, _slice + 1)\n slices = tuple(slices)\n out = np.squeeze(image[slices], view)\n if is_label:\n out[out == 0] = np.nan\n return out\n\n\ndef get_stacked_2d_ims(im, is_label):\n \"\"\"Get the 3 orthogonal views and stack them into 1 image.\n Requires that all images be same size, but this is taken care\n of by the `SpatialPadd` earlier.\n \"\"\"\n return [get_2d_slice(im, i, is_label) for i in range(3)]\n\n\ndef get_stacked_before_after(before, after, is_label=False):\n \"\"\"Stack before and after images into 1 image if 3d.\n Requires that before and after images be the same size.\n \"\"\"\n return [get_stacked_2d_ims(d, is_label) for d in (before, after)]\n\n\ndef save_image(images, labels, filename, transform_name, transform_args, shapes, colorbar=False):\n \"\"\"Save image to file, ensuring there's no whitespace around the edge.\"\"\"\n plt.rcParams.update({\"font.family\": \"monospace\"})\n plt.style.use(\"dark_background\")\n nrow = len(images) # before and after (should always be 2)\n ncol = len(images[0]) # num orthogonal views (either 1 or 3)\n # roughly estimate the height_ratios of the first:second row\n hs = [float(r[0].shape[0]) for r in images]\n fig = plt.figure(tight_layout=True)\n spec = fig.add_gridspec(nrow, ncol, hspace=0, wspace=0, height_ratios=hs)\n for row in range(nrow):\n vmin = min(i.min() for i in images[row])\n vmax = max(i.max() for i in images[row])\n for col in range(ncol):\n ax = fig.add_subplot(spec[row, col])\n imshow = ax.imshow(images[row][col], cmap=\"gray\", vmin=vmin, vmax=vmax)\n ax.set_aspect(\"equal\")\n if colorbar and col == ncol - 1:\n plt.colorbar(imshow, ax=ax)\n if col == 0:\n y_label = \"After\" if row else \"Before\"\n y_label += (\"\\n\" + shapes[row]) if shapes[0] != shapes[1] else \"\"\n ax.set_ylabel(y_label)\n # print yticks for the right most column\n if col != ncol - 1 or colorbar:\n ax.set_yticks([])\n else:\n ax.yaxis.tick_right()\n for n, label in enumerate(ax.yaxis.get_ticklabels()):\n if n > 2:\n label.set_visible(False)\n ax.set_xticks([])\n ax.set_frame_on(False)\n if labels is not None:\n ax.imshow(labels[row][col], cmap=\"hsv\", alpha=0.9, interpolation=\"nearest\")\n # title is e.g., Flipd(keys=keys, spatial_axis=0)\n title = transform_name + \"(\"\n for k, v in transform_args.items():\n title += k + \"=\"\n if isinstance(v, str):\n title += \"'\" + v + \"'\"\n elif isinstance(v, (np.ndarray, torch.Tensor)):\n title += \"[array]\"\n elif isinstance(v, Callable):\n title += \"[callable]\"\n else:\n title += str(v)\n title += \", \"\n if len(transform_args) > 0:\n title = title[:-2]\n title += \")\"\n # shorten the lines\n title = textwrap.fill(title, 50, break_long_words=False, subsequent_indent=\" \" * (len(transform_name) + 1))\n fig.suptitle(title, x=0.1, horizontalalignment=\"left\")\n fig.savefig(filename)\n plt.close(fig)\n\n\ndef get_images(data, is_label=False):\n \"\"\"Get image. If is dictionary, extract key. If is list, stack. If both dictionary and list, do both.\n Also return the image size as string to be used im the imshow. If it's a list, return `N x (H,W,D)`.\n \"\"\"\n # If not a list, convert\n if not isinstance(data, list):\n data = [data]\n key = CommonKeys.LABEL if is_label else CommonKeys.IMAGE\n is_map = isinstance(data[0], dict)\n # length of the list will be equal to number of samples produced. This will be 1 except for transforms that\n # produce `num_samples`.\n data = [d[key] if is_map else d for d in data]\n data = [d[0] for d in data] # remove channel component\n\n # for each sample, create a list of the orthogonal views. If image is 2d, length will be 1. If 3d, there\n # will be three orthogonal views\n num_samples = len(data)\n num_orthog_views = 3 if data[0].ndim == 3 else 1\n shape_str = (f\"{num_samples} x \" if num_samples > 1 else \"\") + str(data[0].shape)\n for i in range(num_samples):\n data[i] = [get_2d_slice(data[i], view, is_label) for view in range(num_orthog_views)]\n\n out = []\n if num_samples == 1:\n out = data[0]\n else:\n # we might need to panel the images. this happens if a transform produces e.g. 4 output images.\n # In this case, we create a 2-by-2 grid from them. Output will be a list containing n_orthog_views,\n # each element being either the image (if num_samples is 1) or the panelled image.\n nrows = int(np.floor(num_samples ** 0.5))\n for view in range(num_orthog_views):\n result = np.asarray([d[view] for d in data])\n nindex, height, width = result.shape\n ncols = nindex // nrows\n # only implemented for square number of images (e.g. 4 images goes to a 2-by-2 panel)\n if nindex != nrows * ncols:\n raise NotImplementedError\n # want result.shape = (height*nrows, width*ncols), have to be careful about striding\n result = result.reshape(nrows, ncols, height, width).swapaxes(1, 2).reshape(height * nrows, width * ncols)\n out.append(result)\n return out, shape_str\n\n\ndef create_transform_im(\n transform, transform_args, data, ndim=3, colorbar=False, update_doc=True, out_dir=None, seed=0, is_post=False\n):\n \"\"\"Create an image with the before and after of the transform.\n Also update the transform's documentation to point to this image.\"\"\"\n\n transform = transform(**transform_args)\n\n if not has_matplotlib:\n raise RuntimeError\n\n if isinstance(transform, Randomizable):\n # increment the seed for map transforms so they're different to the array versions.\n seed = seed + 1 if isinstance(transform, MapTransform) else seed\n transform.set_random_state(seed)\n\n out_dir = os.environ.get(\"MONAI_DOC_IMAGES\")\n if out_dir is None:\n raise RuntimeError(\n \"Please git clone https://github.com/Project-MONAI/DocImages\"\n + \" and then set the environment variable `MONAI_DOC_IMAGES`\"\n )\n out_dir = os.path.join(out_dir, \"transforms\")\n\n # Path is transform name\n transform_name = transform.__class__.__name__\n out_fname = transform_name + \".png\"\n out_file = os.path.join(out_dir, out_fname)\n\n is_map = isinstance(transform, MapTransform)\n data_in = pre_process_data(deepcopy(data), ndim, is_map, is_post)\n\n data_tr = transform(deepcopy(data_in))\n\n images_before, before_shape = get_images(data_in)\n images_after, after_shape = get_images(data_tr)\n images = (images_before, images_after)\n shapes = (before_shape, after_shape)\n\n labels = None\n if is_map:\n labels_before, *_ = get_images(data_in, is_label=True)\n labels_after, *_ = get_images(data_tr, is_label=True)\n labels = (labels_before, labels_after)\n\n save_image(images, labels, out_file, transform_name, transform_args, shapes, colorbar)\n\n if update_doc:\n base_dir = pathlib.Path(__file__).parent.parent.parent\n rst_path = os.path.join(base_dir, \"docs\", \"source\", \"transforms.rst\")\n update_docstring(rst_path, transform_name)\n\n\nif __name__ == \"__main__\":\n\n keys = [CommonKeys.IMAGE, CommonKeys.LABEL]\n data = get_data(keys)\n create_transform_im(RandFlip, dict(prob=1, spatial_axis=1), data)\n create_transform_im(RandFlipd, dict(keys=keys, prob=1, spatial_axis=2), data)\n create_transform_im(Flip, dict(spatial_axis=1), data)\n create_transform_im(Flipd, dict(keys=keys, spatial_axis=2), data)\n create_transform_im(Flipd, dict(keys=keys, spatial_axis=2), data)\n create_transform_im(Orientation, dict(axcodes=\"RPI\", image_only=True), data)\n create_transform_im(Orientationd, dict(keys=keys, axcodes=\"RPI\"), data)\n create_transform_im(\n Rand3DElastic, dict(prob=1.0, sigma_range=(1, 2), magnitude_range=(0.5, 0.5), shear_range=(1, 1, 1)), data\n )\n create_transform_im(Affine, dict(shear_params=(0, 0.5, 0), image_only=True, padding_mode=\"zeros\"), data)\n create_transform_im(\n Affined, dict(keys=keys, shear_params=(0, 0.5, 0), mode=[\"bilinear\", \"nearest\"], padding_mode=\"zeros\"), data\n )\n create_transform_im(RandAffine, dict(prob=1, shear_range=(0.5, 0.5), padding_mode=\"zeros\"), data)\n create_transform_im(\n RandAffined,\n dict(keys=keys, prob=1, shear_range=(0.5, 0.5), mode=[\"bilinear\", \"nearest\"], padding_mode=\"zeros\"),\n data,\n )\n create_transform_im(\n Rand3DElastic, dict(sigma_range=(5, 7), magnitude_range=(50, 150), prob=1, padding_mode=\"zeros\"), data\n )\n create_transform_im(\n Rand2DElastic, dict(prob=1, spacing=(20, 20), magnitude_range=(1, 2), padding_mode=\"zeros\"), data, 2\n )\n create_transform_im(\n Rand2DElasticd,\n dict(\n keys=keys,\n prob=1,\n spacing=(20, 20),\n magnitude_range=(1, 2),\n padding_mode=\"zeros\",\n mode=[\"bilinear\", \"nearest\"],\n ),\n data,\n 2,\n )\n create_transform_im(\n Rand3DElasticd,\n dict(\n keys=keys,\n sigma_range=(5, 7),\n magnitude_range=(50, 150),\n prob=1,\n padding_mode=\"zeros\",\n mode=[\"bilinear\", \"nearest\"],\n ),\n data,\n )\n create_transform_im(Rotate90, dict(spatial_axes=(1, 2)), data)\n create_transform_im(Rotate90d, dict(keys=keys, spatial_axes=(1, 2)), data)\n create_transform_im(RandRotate90, dict(prob=1), data)\n create_transform_im(RandRotate90d, dict(keys=keys, prob=1), data)\n create_transform_im(Rotate, dict(angle=0.1), data)\n create_transform_im(Rotated, dict(keys=keys, angle=0.1, mode=[\"bilinear\", \"nearest\"]), data)\n create_transform_im(RandRotate, dict(prob=1, range_x=[0.4, 0.4]), data)\n create_transform_im(RandRotated, dict(keys=keys, prob=1, range_x=[0.4, 0.4], mode=[\"bilinear\", \"nearest\"]), data)\n create_transform_im(Zoom, dict(zoom=0.6), data)\n create_transform_im(Zoomd, dict(keys=keys, zoom=1.3, mode=[\"area\", \"nearest\"]), data)\n create_transform_im(RandZoom, dict(prob=1, min_zoom=0.6, max_zoom=0.8), data)\n create_transform_im(RandZoomd, dict(keys=keys, prob=1, min_zoom=1.3, max_zoom=1.5, mode=[\"area\", \"nearest\"]), data)\n create_transform_im(ScaleIntensity, dict(minv=0, maxv=10), data, colorbar=True)\n create_transform_im(ScaleIntensityd, dict(keys=CommonKeys.IMAGE, minv=0, maxv=10), data, colorbar=True)\n create_transform_im(RandScaleIntensity, dict(prob=1.0, factors=(5, 10)), data, colorbar=True)\n create_transform_im(\n RandScaleIntensityd, dict(keys=CommonKeys.IMAGE, prob=1.0, factors=(5, 10)), data, colorbar=True\n )\n create_transform_im(DivisiblePad, dict(k=64), data)\n create_transform_im(DivisiblePadd, dict(keys=keys, k=64), data)\n create_transform_im(CropForeground, dict(), data)\n create_transform_im(CropForegroundd, dict(keys=keys, source_key=CommonKeys.IMAGE), data)\n create_transform_im(RandGaussianNoise, dict(prob=1, mean=0, std=0.1), data)\n create_transform_im(RandGaussianNoised, dict(keys=CommonKeys.IMAGE, prob=1, mean=0, std=0.1), data)\n create_transform_im(KSpaceSpikeNoise, dict(loc=(100, 100, 100), k_intensity=13), data)\n create_transform_im(KSpaceSpikeNoised, dict(keys=CommonKeys.IMAGE, loc=(100, 100, 100), k_intensity=13), data)\n create_transform_im(RandKSpaceSpikeNoise, dict(prob=1, intensity_range=(10, 13)), data)\n create_transform_im(\n RandKSpaceSpikeNoised,\n dict(keys=CommonKeys.IMAGE, global_prob=1, prob=1, common_sampling=True, intensity_range=(13, 15)),\n data,\n )\n create_transform_im(GibbsNoise, dict(alpha=0.8), data)\n create_transform_im(GibbsNoised, dict(keys=CommonKeys.IMAGE, alpha=0.8), data)\n create_transform_im(RandGibbsNoise, dict(prob=1.0, alpha=(0.6, 0.8)), data)\n create_transform_im(RandGibbsNoised, dict(keys=CommonKeys.IMAGE, prob=1.0, alpha=(0.6, 0.8)), data)\n create_transform_im(ShiftIntensity, dict(offset=1), data, colorbar=True)\n create_transform_im(ShiftIntensityd, dict(keys=CommonKeys.IMAGE, offset=1), data, colorbar=True)\n create_transform_im(RandShiftIntensity, dict(prob=1.0, offsets=(10, 20)), data, colorbar=True)\n create_transform_im(\n RandShiftIntensityd, dict(keys=CommonKeys.IMAGE, prob=1.0, offsets=(10, 20)), data, colorbar=True\n )\n create_transform_im(StdShiftIntensity, dict(factor=10), data, colorbar=True)\n create_transform_im(StdShiftIntensityd, dict(keys=CommonKeys.IMAGE, factor=10), data, colorbar=True)\n create_transform_im(RandStdShiftIntensity, dict(prob=1.0, factors=(5, 10)), data, colorbar=True)\n create_transform_im(\n RandStdShiftIntensityd, dict(keys=CommonKeys.IMAGE, prob=1.0, factors=(5, 10)), data, colorbar=True\n )\n create_transform_im(RandBiasField, dict(prob=1, coeff_range=(0.2, 0.3)), data)\n create_transform_im(RandBiasFieldd, dict(keys=CommonKeys.IMAGE, prob=1, coeff_range=(0.2, 0.3)), data)\n create_transform_im(NormalizeIntensity, dict(subtrahend=0, divisor=10), data, colorbar=True)\n create_transform_im(NormalizeIntensityd, dict(keys=CommonKeys.IMAGE, subtrahend=0, divisor=10), data, colorbar=True)\n create_transform_im(ThresholdIntensity, dict(threshold=0.4, above=False, cval=0.9), data, colorbar=True)\n create_transform_im(\n ThresholdIntensityd, dict(keys=CommonKeys.IMAGE, threshold=0.4, above=False, cval=0.9), data, colorbar=True\n )\n create_transform_im(ScaleIntensityRange, dict(a_min=0, a_max=1, b_min=1, b_max=10), data, colorbar=True)\n create_transform_im(\n ScaleIntensityRanged, dict(keys=CommonKeys.IMAGE, a_min=0, a_max=1, b_min=1, b_max=10), data, colorbar=True\n )\n create_transform_im(ScaleIntensityRangePercentiles, dict(lower=5, upper=95, b_min=1, b_max=10), data, colorbar=True)\n create_transform_im(\n ScaleIntensityRangePercentilesd,\n dict(keys=CommonKeys.IMAGE, lower=5, upper=95, b_min=1, b_max=10),\n data,\n colorbar=True,\n )\n create_transform_im(AdjustContrast, dict(gamma=2), data, colorbar=True)\n create_transform_im(AdjustContrastd, dict(keys=CommonKeys.IMAGE, gamma=2), data, colorbar=True)\n create_transform_im(RandAdjustContrast, dict(prob=1, gamma=(1.5, 2)), data, colorbar=True)\n create_transform_im(RandAdjustContrastd, dict(keys=CommonKeys.IMAGE, prob=1, gamma=(1.5, 2)), data, colorbar=True)\n create_transform_im(MaskIntensity, dict(mask_data=data[CommonKeys.IMAGE], select_fn=lambda x: x > 0.3), data)\n create_transform_im(\n MaskIntensityd, dict(keys=CommonKeys.IMAGE, mask_key=CommonKeys.IMAGE, select_fn=lambda x: x > 0.3), data\n )\n create_transform_im(GaussianSmooth, dict(sigma=2), data)\n create_transform_im(GaussianSmoothd, dict(keys=CommonKeys.IMAGE, sigma=2), data)\n create_transform_im(RandGaussianSmooth, dict(prob=1.0, sigma_x=(1, 2)), data)\n create_transform_im(RandGaussianSmoothd, dict(keys=CommonKeys.IMAGE, prob=1.0, sigma_x=(1, 2)), data)\n create_transform_im(GaussianSharpen, dict(), GaussianSmoothd(CommonKeys.IMAGE, 2)(data))\n create_transform_im(GaussianSharpend, dict(keys=CommonKeys.IMAGE), GaussianSmoothd(CommonKeys.IMAGE, 2)(data))\n create_transform_im(RandGaussianSharpen, dict(prob=1), GaussianSmoothd(CommonKeys.IMAGE, 2)(data))\n create_transform_im(\n RandGaussianSharpend, dict(keys=CommonKeys.IMAGE, prob=1), GaussianSmoothd(CommonKeys.IMAGE, 2)(data)\n )\n create_transform_im(RandHistogramShift, dict(prob=1, num_control_points=3), data, colorbar=True)\n create_transform_im(\n RandHistogramShiftd, dict(keys=CommonKeys.IMAGE, prob=1, num_control_points=3), data, colorbar=True\n )\n create_transform_im(RandCoarseDropout, dict(prob=1, holes=200, spatial_size=20, fill_value=0), data)\n create_transform_im(\n RandCoarseDropoutd, dict(keys=CommonKeys.IMAGE, prob=1, holes=200, spatial_size=20, fill_value=0), data\n )\n create_transform_im(RandCoarseShuffle, dict(prob=1, holes=200, spatial_size=20), data)\n create_transform_im(RandCoarseShuffled, dict(keys=CommonKeys.IMAGE, prob=1, holes=200, spatial_size=20), data)\n create_transform_im(HistogramNormalize, dict(num_bins=10), data)\n create_transform_im(HistogramNormalized, dict(keys=CommonKeys.IMAGE, num_bins=10), data)\n create_transform_im(SpatialPad, dict(spatial_size=(300, 300, 300)), data)\n create_transform_im(SpatialPadd, dict(keys=keys, spatial_size=(300, 300, 300)), data)\n create_transform_im(BorderPad, dict(spatial_border=10), data)\n create_transform_im(BorderPadd, dict(keys=keys, spatial_border=10), data)\n create_transform_im(SpatialCrop, dict(roi_center=(75, 75, 75), roi_size=(100, 100, 100)), data)\n create_transform_im(SpatialCropd, dict(keys=keys, roi_center=(75, 75, 75), roi_size=(100, 100, 100)), data)\n create_transform_im(CenterSpatialCrop, dict(roi_size=(100, 100, 100)), data)\n create_transform_im(CenterSpatialCropd, dict(keys=keys, roi_size=(100, 100, 100)), data)\n create_transform_im(RandSpatialCrop, dict(roi_size=(100, 100, 100), random_size=False), data)\n create_transform_im(RandSpatialCropd, dict(keys=keys, roi_size=(100, 100, 100), random_size=False), data)\n create_transform_im(RandSpatialCropSamples, dict(num_samples=4, roi_size=(100, 100, 100), random_size=False), data)\n create_transform_im(\n RandSpatialCropSamplesd, dict(keys=keys, num_samples=4, roi_size=(100, 100, 100), random_size=False), data\n )\n create_transform_im(\n RandWeightedCrop, dict(spatial_size=(100, 100, 100), num_samples=4, weight_map=data[CommonKeys.IMAGE] > 0), data\n )\n create_transform_im(\n RandWeightedCropd, dict(keys=keys, spatial_size=(100, 100, 100), num_samples=4, w_key=CommonKeys.IMAGE), data\n )\n create_transform_im(\n RandCropByPosNegLabel,\n dict(spatial_size=(100, 100, 100), label=data[CommonKeys.LABEL], neg=0, num_samples=4),\n data,\n )\n create_transform_im(\n RandCropByPosNegLabeld,\n dict(keys=keys, spatial_size=(100, 100, 100), label_key=CommonKeys.LABEL, neg=0, num_samples=4),\n data,\n )\n create_transform_im(\n RandCropByLabelClasses,\n dict(\n spatial_size=(100, 100, 100), label=data[CommonKeys.LABEL] > 0, num_classes=2, ratios=[0, 1], num_samples=4\n ),\n data,\n )\n create_transform_im(\n RandCropByLabelClassesd,\n dict(\n keys=keys,\n spatial_size=(100, 100, 100),\n label_key=CommonKeys.LABEL,\n num_classes=2,\n ratios=[0, 1],\n num_samples=4,\n ),\n data,\n )\n create_transform_im(ResizeWithPadOrCrop, dict(spatial_size=(100, 100, 100)), data)\n create_transform_im(ResizeWithPadOrCropd, dict(keys=keys, spatial_size=(100, 100, 100)), data)\n create_transform_im(RandScaleCrop, dict(roi_scale=0.4), data)\n create_transform_im(RandScaleCropd, dict(keys=keys, roi_scale=0.4), data)\n create_transform_im(CenterScaleCrop, dict(roi_scale=0.4), data)\n create_transform_im(CenterScaleCropd, dict(keys=keys, roi_scale=0.4), data)\n create_transform_im(\n AsDiscrete, dict(num_classes=2, threshold_values=True, logit_thresh=10), data, is_post=True, colorbar=True\n )\n create_transform_im(\n AsDiscreted,\n dict(keys=CommonKeys.LABEL, num_classes=2, threshold_values=True, logit_thresh=10),\n data,\n is_post=True,\n )\n create_transform_im(LabelFilter, dict(applied_labels=(1, 2, 3, 4, 5, 6)), data, is_post=True)\n create_transform_im(\n LabelFilterd, dict(keys=CommonKeys.LABEL, applied_labels=(1, 2, 3, 4, 5, 6)), data, is_post=True\n )\n create_transform_im(LabelToContour, dict(), data, is_post=True)\n create_transform_im(LabelToContourd, dict(keys=CommonKeys.LABEL), data, is_post=True)\n create_transform_im(Spacing, dict(pixdim=(5, 5, 5), image_only=True), data)\n create_transform_im(Spacingd, dict(keys=keys, pixdim=(5, 5, 5), mode=[\"bilinear\", \"nearest\"]), data)\n create_transform_im(RandAxisFlip, dict(prob=1), data)\n create_transform_im(RandAxisFlipd, dict(keys=keys, prob=1), data)\n create_transform_im(Resize, dict(spatial_size=(100, 100, 100)), data)\n create_transform_im(Resized, dict(keys=keys, spatial_size=(100, 100, 100), mode=[\"area\", \"nearest\"]), data)\n data_binary = deepcopy(data)\n data_binary[CommonKeys.LABEL] = (data_binary[CommonKeys.LABEL] > 0).astype(np.float32)\n create_transform_im(KeepLargestConnectedComponent, dict(applied_labels=1), data_binary, is_post=True, ndim=2)\n create_transform_im(\n KeepLargestConnectedComponentd, dict(keys=CommonKeys.LABEL, applied_labels=1), data_binary, is_post=True, ndim=2\n )\n create_transform_im(RandGridDistortion, dict(num_cells=3, prob=1.0, distort_limit=(-0.1, 0.1)), data)\n create_transform_im(\n RandGridDistortiond,\n dict(keys=keys, num_cells=4, prob=1.0, distort_limit=(-0.2, 0.2), mode=[\"bilinear\", \"nearest\"]),\n data,\n )\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.transforms import VoteEnsembled\n\n# shape: [1, 2, 1, 1]\nTEST_CASE_1 = [\n {\"keys\": [\"pred0\", \"pred1\", \"pred2\"], \"output_key\": \"output\", \"num_classes\": None},\n {\n \"pred0\": torch.tensor([[[[1]], [[0]]]]),\n \"pred1\": torch.tensor([[[[1]], [[0]]]]),\n \"pred2\": torch.tensor([[[[0]], [[1]]]]),\n },\n torch.tensor([[[[1.0]], [[0.0]]]]),\n]\n\n# shape: [1, 2, 1, 1]\nTEST_CASE_2 = [\n {\"keys\": \"output\", \"output_key\": \"output\", \"num_classes\": None},\n {\n \"output\": torch.stack(\n [torch.tensor([[[[1]], [[0]]]]), torch.tensor([[[[1]], [[0]]]]), torch.tensor([[[[0]], [[1]]]])]\n )\n },\n torch.tensor([[[[1.0]], [[0.0]]]]),\n]\n\n# shape: [1, 2, 1]\nTEST_CASE_3 = [\n {\"keys\": [\"pred0\", \"pred1\", \"pred2\"], \"output_key\": \"output\", \"num_classes\": 3},\n {\"pred0\": torch.tensor([[[0], [2]]]), \"pred1\": torch.tensor([[[0], [2]]]), \"pred2\": torch.tensor([[[1], [1]]])},\n torch.tensor([[[0], [2]]]),\n]\n\n# shape: [1, 2, 1]\nTEST_CASE_4 = [\n {\"keys\": [\"pred0\", \"pred1\", \"pred2\"], \"output_key\": \"output\", \"num_classes\": 5},\n {\"pred0\": torch.tensor([[[0], [2]]]), \"pred1\": torch.tensor([[[0], [2]]]), \"pred2\": torch.tensor([[[1], [1]]])},\n torch.tensor([[[0], [2]]]),\n]\n\n# shape: [1]\nTEST_CASE_5 = [\n {\"keys\": [\"pred0\", \"pred1\", \"pred2\"], \"output_key\": \"output\", \"num_classes\": 3},\n {\"pred0\": torch.tensor([2]), \"pred1\": torch.tensor([2]), \"pred2\": torch.tensor([1])},\n torch.tensor([2]),\n]\n\n\nclass TestVoteEnsembled(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5])\n def test_value(self, input_param, img, expected_value):\n result = VoteEnsembled(**input_param)(img)\n torch.testing.assert_allclose(result[\"output\"], expected_value)\n\n def test_cuda_value(self):\n img = torch.stack(\n [torch.tensor([[[[1]], [[0]]]]), torch.tensor([[[[1]], [[0]]]]), torch.tensor([[[[0]], [[1]]]])]\n )\n expected_value = torch.tensor([[[[1.0]], [[0.0]]]])\n if torch.cuda.is_available():\n img = img.to(torch.device(\"cuda:0\"))\n expected_value = expected_value.to(torch.device(\"cuda:0\"))\n result = VoteEnsembled(keys=\"output\", num_classes=None)({\"output\": img})\n torch.testing.assert_allclose(result[\"output\"], expected_value)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.asarray",
"numpy.squeeze",
"matplotlib.pyplot.colorbar",
"torch.as_tensor",
"matplotlib.pyplot.close",
"numpy.floor",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
],
[
"torch.device",
"torch.testing.assert_allclose",
"torch.cuda.is_available",
"torch.tensor"
]
] |
joseppinilla/embedding-methods
|
[
"a29fea829ec33d586ccbc5c9b6ec6586c2a2154f"
] |
[
"embera/transform/embedding.py"
] |
[
"import minorminer\n\nimport numpy as np\n\nfrom embera.utilities.decorators import nx_graph\nfrom embera.preprocess.tiling_parser import DWaveNetworkXTiling\n\n__all__ = ['translate','mirror','rotate','spread_out','open_seam',\n 'iter_sliding_window', 'greedy_fit','reconnect']\n\n\"\"\" ################### Naive Embedding Transformations ####################\n Transformation methods for embeddings onto Tiled D-Wave Architectures\n\n Arguments:\n T: (networkx.Graph)\n A NetworkX Graph with the construction parameters generated using\n `dwave_networkx`:\n family : {'chimera','pegasus', ...}\n rows : (int)\n columns : (int)\n labels : {'coordinate', 'int', 'nice'}\n\n embedding: (dict)\n A dictionary mapping variable names to lists of labels in T\n\n Note:\n A valid embedding is not guaranteed from these transformations. To\n generate a valid embedding from the result of this transformation, use\n `embera.transform.embedding.reconnect(S,T,new_embedding)` or similar.\n\n\"\"\"\n\ndef translate(T, embedding, origin=(0,0)):\n \"\"\" Transport the embedding on the same graph to re-distribute qubit\n assignments.\n\n Optional arguments:\n origin: (tuple)\n A tuple of tile coordinates pointing to where the left-uppermost\n occupied tile in the embedding should move to. All other tiles\n are moved relative to the origin.\n\n Example:\n >>> import embera\n >>> import networkx as nx\n >>> import dwave_networkx as dnx\n >>> S = nx.complete_graph(11)\n >>> T = dnx.chimera_graph(7)\n >>> embedding = minorminer.find_embedding(S,T)\n >>> dnx.draw_chimera_embedding(T,embedding,node_size=10)\n >>> origin = (2,3)\n >>> new_embedding = embera.transform.embedding.translate(T,embedding,origin)\n >>> dnx.draw_chimera_embedding(T,new_embedding,node_size=10)\n \"\"\"\n tiling = DWaveNetworkXTiling(T)\n shape = tiling.shape\n # Initialize offset\n offset = shape\n # Find margins\n for v,chain in embedding.items():\n for q in chain:\n tile = np.array(tiling.get_tile(q))\n offset = [min(t,o) for t,o in zip(tile,offset)]\n # Define flips\n m,n = tiling.shape\n t = tiling.graph['tile']\n new_embedding = {}\n for v,chain in embedding.items():\n new_chain = []\n for q in chain:\n k = tiling.get_k(q)\n tile = tiling.get_tile(q)\n shore = tiling.get_shore(q)\n new_tile = tuple(np.array(tile) - np.array(offset) + np.array(origin))\n new_q = tiling.set_tile(q,new_tile)\n new_chain.append(new_q)\n new_embedding[v] = new_chain\n\n return new_embedding\n\n\ndef mirror(T, embedding, axis=0):\n \"\"\" Flip the embedding on the same graph to re-distribute qubit\n assignments.\n\n Optional arguments:\n\n axis: {0,1}\n 0 toflip on horizontal and 1 to flip on vertical\n\n Example:\n >>> import embera\n >>> import networkx as nx\n >>> import dwave_networkx as dnx\n >>> S = nx.complete_graph(11)\n >>> T = dnx.chimera_graph(7)\n >>> embedding = minorminer.find_embedding(S,T)\n >>> dnx.draw_chimera_embedding(T,embedding,node_size=10)\n >>> axis = 1\n >>> new_embedding = embera.transform.embedding.mirror(T,embedding,axis)\n >>> dnx.draw_chimera_embedding(T,new_embedding,node_size=10)\n \"\"\"\n tiling = DWaveNetworkXTiling(T)\n shape = np.array(tiling.shape)\n # Define flips\n m,n = tiling.shape\n t = tiling.graph['tile']\n if axis is 0:\n new_tile = lambda i,j: (i,n-j-1)\n new_k = lambda k,shore: k if shore else t-k-1\n elif axis is 1:\n new_tile = lambda i,j: (m-i-1,j)\n new_k = lambda k,shore: t-k-1 if shore else k\n else:\n raise ValueError(\"Value of axis not supported\")\n # Mirror all qubits by chain\n new_embedding = {}\n for v,chain in embedding.items():\n new_chain = []\n for q in chain:\n k = tiling.get_k(q)\n tile = tiling.get_tile(q)\n shore = tiling.get_shore(q)\n new_coordinates = (new_tile(*tile),shore,new_k(k,shore))\n new_chain.append(next(tiling.get_qubits(*new_coordinates)))\n new_embedding[v] = new_chain\n\n return new_embedding\n\ndef rotate(T, embedding, theta=90):\n \"\"\" Rotate the embedding on the same graph to re-distribute qubit\n assignments. If a perfect fit isn't found, due to disabled qubits,\n the invalid embedding is still returned.\n\n Optional arguments:\n\n theta: ({0,90,180,270,360,-90,-180,-270})\n Rotation angle.\n\n Example:\n >>> import embera\n >>> import networkx as nx\n >>> import dwave_networkx as dnx\n >>> S = nx.complete_graph(11)\n >>> T = dnx.chimera_graph(7)\n >>> embedding = minorminer.find_embedding(S,T)\n >>> dnx.draw_chimera_embedding(T,embedding,node_size=10)\n >>> theta = 270\n >>> new_embedding = embera.transform.embedding.rotate(T,embedding,theta)\n >>> dnx.draw_chimera_embedding(T,new_embedding,node_size=10)\n \"\"\"\n tiling = DWaveNetworkXTiling(T)\n shape = np.array(tiling.shape)\n # Define rotations\n m,n = tiling.shape\n t = tiling.graph['tile']\n if theta in [90,-270]:\n new_tile = lambda i,j: (j, m-i-1)\n new_shore = lambda shore: 0 if shore else 1\n new_k = lambda k,shore: t-k-1 if shore else k\n elif theta in [180,-180]:\n new_tile = lambda i,j: (m-i-1,n-j-1)\n new_shore = lambda shore: shore\n new_k = lambda k,shore: t-k-1\n elif theta in [-90,270]:\n new_tile = lambda i,j: (n-j-1, i)\n new_shore = lambda shore: 0 if shore else 1\n new_k = lambda k,shore: k if shore else t-k-1\n elif theta in [0,360]:\n return embedding\n else:\n raise ValueError(\"Value of theta not supported\")\n # Rotate all qubits by chain\n new_embedding = {}\n for v,chain in embedding.items():\n new_chain = []\n for q in chain:\n k = tiling.get_k(q)\n tile = tiling.get_tile(q)\n shore = tiling.get_shore(q)\n new_coordinates = (new_tile(*tile),new_shore(shore),new_k(k,shore))\n new_chain.append(next(tiling.get_qubits(*new_coordinates)))\n new_embedding[v] = new_chain\n\n return new_embedding\n\ndef spread_out(T, embedding, sheer=None):\n \"\"\" Transform the tile assignment to spread out the embedding starting from\n tile (0,0) and placing originally adjacent tiles 1 extra tile away.\n\n Optional arguments:\n\n sheer: {None,0, 1}\n Perform a translation of every odd column (sheer=0) or row\n (sheer=1)\n\n Example:\n >>> import embera\n >>> import networkx as nx\n >>> import dwave_networkx as dnx\n >>> S = nx.complete_graph(17)\n >>> T = dnx.chimera_graph(8)\n >>> embedding = minorminer.find_embedding(S,T)\n >>> dnx.draw_chimera_embedding(T,embedding, node_size=10)\n >>> new_embedding = embera.transform.embedding.spread_out(T,embedding)\n >>> dnx.draw_chimera_embedding(T,new_embedding,node_size=10)\n \"\"\"\n tiling = DWaveNetworkXTiling(T)\n shape = np.array(tiling.shape)\n # Initialize edges\n origin = shape\n end = (0,)*len(origin)\n # Find edges\n for v,chain in embedding.items():\n for q in chain:\n tile = np.array(tiling.get_tile(q))\n origin = [min(t,o) for t,o in zip(tile,origin)]\n end = [max(t,e) for t,e in zip(tile,end)]\n # Make sure it fits\n if tuple((np.array(end)-np.array(origin))*2) > tiling.shape:\n raise RuntimeError(\"Can't spread out\")\n # Spread out all qubits by chain\n new_embedding = {}\n if sheer is None:\n shift = lambda tile,origin: (tile-origin)*2\n elif sheer == 0:\n shift = lambda tile,origin: (tile-origin)*2+np.flip((tile-origin)%[2,1])\n elif sheer == 1:\n shift = lambda tile,origin: (tile-origin)*2+np.flip((tile-origin)%[1,2])\n\n for v,chain in embedding.items():\n new_chain = []\n for q in chain:\n tile = np.array(tiling.get_tile(q))\n new_tile = tuple(shift(tile,origin))\n new_q = tiling.set_tile(q,new_tile)\n new_chain.append(new_q)\n new_embedding[v] = new_chain\n\n return new_embedding\n\ndef open_seam(T, embedding, seam, direction):\n \"\"\"\n Arguments (continued):\n seam: (int)\n If direction is 'left' or 'right', seam corresponds to the\n column number. If direction is 'up' or 'down', seam corresponds\n to the row number.\n\n direction: (None or str:{'left','right','up','down'})\n Given a seam index, that column/row is cleared and all utilized\n qubits in the embedding are shifted in this direction.\n\n Example:\n >>> import embera\n >>> import networkx as nx\n >>> import dwave_networkx as dnx\n >>> S = nx.complete_graph(10)\n >>> T = dnx.chimera_graph(8)\n >>> embedding = minorminer.find_embedding(S,T,random_seed=10)\n >>> dnx.draw_chimera_embedding(T,embedding,node_size=10)\n >>> seam = 2\n >>> direction = 'right'\n >>> new_embedding = embera.transform.embedding.open_seam(T,embedding,seam,direction)\n >>> dnx.draw_chimera_embedding(T,new_embedding,node_size=10)\n \"\"\"\n tiling = DWaveNetworkXTiling(T)\n\n if direction is 'left':\n shift = lambda tile: tile[1]<=seam\n offset = np.array([0,-1])\n elif direction is 'right':\n shift = lambda tile: tile[1]>=seam\n offset = np.array([0,+1])\n elif direction is 'up':\n shift = lambda tile: tile[0]<=seam\n offset = np.array([-1,0])\n elif direction is 'down':\n shift = lambda tile: tile[0]>=seam\n offset = np.array([+1,0])\n else:\n raise ValueError(\"Direction not in {'left','right','up','down'}\")\n\n new_embedding = {}\n for v,chain in embedding.items():\n new_chain = []\n for q in chain:\n tile = np.array(tiling.get_tile(q))\n new_tile = tuple(tile + offset) if shift(tile) else tuple(tile)\n new_q = tiling.set_tile(q,new_tile)\n new_chain.append(new_q)\n new_embedding[v] = new_chain\n\n return new_embedding\n\ndef iter_sliding_window(T, embedding):\n \"\"\" Use a sliding window approach to iteratively transport the embedding\n from one region of the Chimera graph to another.\n\n Example:\n >>> import embera\n >>> import networkx as nx\n >>> import dwave_networkx as dnx\n >>> import matplotlib.pyplot as plt\n >>> S = nx.complete_graph(11)\n >>> T = dnx.chimera_graph(7)\n >>> embedding = minorminer.find_embedding(S,T)\n >>> dnx.draw_chimera_embedding(T,embedding,node_size=10)\n >>> slide = embera.transform.embedding.sliding_window(T,embedding)\n >>> for new_embedding in slide:\n ... dnx.draw_chimera_embedding(T,new_embedding,node_size=10)\n ... plt.pause(0.2)\n \"\"\"\n tiling = DWaveNetworkXTiling(T)\n shape = np.array(tiling.shape)\n # Initialize edges\n origin = shape\n end = (0,)*len(origin)\n # Find edges\n for v,chain in embedding.items():\n for q in chain:\n tile = np.array(tiling.get_tile(q))\n origin = [min(t,o) for t,o in zip(tile,origin)]\n end = [max(t,e) for t,e in zip(tile,end)]\n\n # Move tiles to origin and translate to try and find valid embedding\n size = np.array(end) - np.array(origin)\n interactions = lambda u,v,E:((s,t) for s in E[u] for t in E[v])\n is_connected = lambda edges: any(T.has_edge(s,t) for s,t in edges)\n for x in range(shape[1]-size[1]):\n for y in range(shape[0]-size[0]):\n slide = {}\n offset = np.array([x,y])\n # Translate all qubits\n for v,chain in embedding.items():\n new_chain = []\n for q in chain:\n tile = np.array(tiling.get_tile(q))\n new_tile = tuple(tile - np.array(origin) + offset)\n new_q = tiling.set_tile(q,new_tile)\n new_chain.append(new_q)\n slide[v] = new_chain\n yield slide\n\n\"\"\" ########################### Optimize Embedding #########################\n Transformation methods to try and find a valid embedding from an invalid one\n\n Arguments:\n S: (networkx.Graph or list of 2-tuples)\n A NetworkX Graph with the adjacency of the embedded graph, or a list\n of edges.\n\n T: (networkx.Graph)\n A NetworkX Graph with the construction parameters generated using\n `dwave_networkx`:\n family : {'chimera','pegasus', ...}\n rows : (int)\n columns : (int)\n labels : {'coordinate', 'int', 'nice'}\n\n embedding: (dict)\n A dictionary mapping variable names to lists of labels in T\n\"\"\"\n@nx_graph(0)\ndef lp_chain_reduce(S, T, embedding):\n \"\"\" TODO: Use a linear programming formulation to resolve shorter chains\n from a given embedding.\n 1) Turn chains into shared qubits\n 2) Create LP formulation\n 3) Resolve chains\n \"\"\"\n import warnings\n warnings.warn(\"WIP: Not implemented yet\")\n return embedding\n\n@nx_graph(0)\ndef greedy_fit(S, T, embedding):\n \"\"\" Using a sling window approach, transform the embedding from one region\n of the Chimera graph to another. This is useful when an embedding is\n done for a D-Wave machine and it's necessary to find an identical\n embedding on another D-Wave machine with different yield.\n\n Algorithm:\n 1) Parse embedding and target graph to find margins.\n 2) Move qubit to window i and check if nodes are available\n 3) If all edges are available, return embedding, else go to 4\n 4) Test same window with 90, 180, and 270 rotations.\n\n Example:\n >>> import embera\n >>> import networkx as nx\n >>> import dwave_networkx as dnx\n >>> S = nx.complete_graph(11)\n >>> T = dnx.chimera_graph(7)\n >>> embedding = minorminer.find_embedding(S,T)\n >>> dnx.draw_chimera_embedding(T,embedding,node_size=10)\n >>> new_embedding = embera.transform.embedding.greedy_fit(S,T,embedding)\n >>> dnx.draw_chimera_embedding(T,new_embedding,node_size=10)\n \"\"\"\n interactions = lambda u,v,E:((s,t) for s in E[u] for t in E[v])\n is_connected = lambda edges: any(T.has_edge(s,t) for s,t in edges)\n for emb in iter_sliding_window(T,embedding):\n if all(is_connected(interactions(u,v,emb)) for u,v in S.edges):\n return emb\n mir = mirror(T,emb)\n if all(is_connected(interactions(u,v,mir)) for u,v in S.edges):\n return mir\n e90 = rotate(T,emb,90)\n if all(is_connected(interactions(u,v,mir)) for u,v in S.edges):\n return e90\n e180 = rotate(T,emb,180)\n if all(is_connected(interactions(u,v,mir)) for u,v in S.edges):\n return e180\n e270 = rotate(T,emb,270)\n if all(is_connected(interactions(u,v,mir)) for u,v in S.edges):\n return e270\n return {}\n\ndef reconnect(S, T, embedding, return_overlap=False):\n \"\"\" Perform a short run of minorminer to find a valid embedding \"\"\"\n # Assign current embedding to suspend_chains to preserve the layout\n suspend_chains = {k:[[q] for q in chain if q in T] for k,chain in embedding.items()}\n # Run minorminer as a short run without chainlength optimization\n miner_params = {'suspend_chains':suspend_chains,\n 'return_overlap':return_overlap}\n return minorminer.find_embedding(S,T,**miner_params)\n"
] |
[
[
"numpy.array",
"numpy.flip"
]
] |
mkaywong/NNSolver
|
[
"13e43584ec96004441a4877c5f24af270c579f20"
] |
[
"NNSolver/Utilities.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\nimport matplotlib.pyplot as pt\n\ndef ImageToArray(batchVol,kernalShape,stride,outArray):\n \"\"\"\n This procedure convert a 4D volume into a 2D array for convolution computation.\n Row major is assumed. That is, data contiquous along the channel C in memory.\n \n batchVol: A batch of 3 D volume - batch size N x height H x width W x channel C\n batchVol needs to be padded accordingly before calling this routine\n kernalShape: (k_h,k_w)\n stride: distance between kernal application, same distance for height and width dimension\n outArray: array to store the converted 2D array\n \"\"\"\n N,H,W,C = batchVol.shape\n k_h,k_w = kernalShape\n H_out = int((H-k_h)/stride)+1\n W_out = int((W-k_w)/stride)+1\n ex_h = N*H_out*W_out\n ex_w = k_h*k_w*C\n if outArray.shape != (ex_h,ex_w):\n print(\"ImageToArray: outArray shape \",outArray.shape, \n \" does not match with expected dims (\",ex_h,',',ex_w,')')\n assert False\n r = 0\n for i in range(N):\n for j in range(0,H-k_h+1,stride):\n for k in range(0,W-k_w+1,stride):\n for l in range(k_h):\n st_col = (l*k_w)*C\n end_col = st_col+k_w*C\n outArray[r,st_col:end_col] = batchVol[i,j+l,k:k+k_w].reshape((1,k_w*C))\n r = r+1\n\nclass colors:\n \"\"\" Simple ANSI escape sequences \"\"\"\n BOLD = '\\033[1m'\n RED = '\\033[91m'\n END = '\\033[0m'\n\ndef plotMNISTResult(data,label,idx):\n '''\n data: numpy array containing MNIST images (,28,28,1)\n label: numpy array containing one hot vectors (,10)\n idx: an array of indexes to plot, the first 16 indexes are used\n '''\n print(np.argmax(label[idx],axis=1))\n _, ax = pt.subplots(1,16,figsize=(10,1))\n for i in range(16):\n ax[i].axis('off')\n ax[i].imshow(data[idx[i]].reshape((28,28)))\n\ndef plotCifar10Images(image,labelOneH,labelNames,idx):\n fig,ax = pt.subplots(2,8,figsize=(8,2))\n fig.tight_layout()\n for i in range(16):\n ax[i//8,i%8].imshow(image[idx+i])\n ax[i//8,i%8].set_title(labelNames[np.argmax(labelOneH[idx+i])])\n ax[i//8,i%8].axis('off')"
] |
[
[
"numpy.argmax",
"matplotlib.pyplot.subplots"
]
] |
ysulsky/krazyworld
|
[
"70eed53462ed94a0d66610e32ef7581553e0bcb5"
] |
[
"legacy/krazy_world_old.py"
] |
[
"from pygame.constants import K_r, K_UP, K_DOWN, K_LEFT, K_RIGHT, K_t, K_a\nimport pygame\nimport numpy as np\nimport sys\nfrom collections import OrderedDict\nimport os\nimport random\nimport copy\nclock = pygame.time.Clock()\n\n\n\n\nEASY_GRID_KWARGS = dict(screen_height=256,\n grid_squares_per_row=5, grid_screens=1,\n num_goals=3, min_goal_dist=1, max_goal_dist=np.inf,\n num_keys=0, num_transporters=0,\n num_steps_before_energy_needed=np.inf, energy_replenish=8, energy_sq_perc=0.00,\n death_sq_perc=0.15, ice_sq_perc=0.15,\n dynamics_type='default', reward_type='sparse',\n render_type='local', headless=False, one_hot_obs=True,\n seed=None, task_seed=69, init_pos_seed=42)\nMEDIUM_GRID_KWARGS = dict(screen_height=256,\n grid_squares_per_row=7, grid_screens=1,\n num_goals=1, min_goal_dist=1, max_goal_dist=np.inf,\n num_keys=0, num_transporters=1,\n num_steps_before_energy_needed=8, energy_replenish=8, energy_sq_perc=0.05,\n death_sq_perc=0.1, ice_sq_perc=0.1,\n dynamics_type='default', reward_type='sparse',\n render_type='local', headless=False, one_hot_obs=True,\n seed=None, task_seed=69, init_pos_seed=42)\nHARD_GRID_KWARGS = dict(screen_height=256,\n grid_squares_per_row=10, grid_screens=1,\n num_goals=3, min_goal_dist=1, max_goal_dist=np.inf,\n num_keys=1, num_transporters=1,\n num_steps_before_energy_needed=12, energy_replenish=8, energy_sq_perc=0.1,\n death_sq_perc=0.15, ice_sq_perc=0.1,\n dynamics_type='simple', reward_type='sparse',\n render_type='local', headless=True, one_hot_obs=True,\n seed=None, task_seed=69, init_pos_seed=42)\n\nfrom moleskin import moleskin as M\n\n\nclass KrazyGridWorld:\n def seed(self, init_pos_seed, task_seed):\n self.init_pos_rng = random.Random(init_pos_seed)\n self.task_rng = np.random.RandomState(task_seed)\n self.task_rng2 = random.Random(task_seed)\n\n def __init__(self, screen_height,\n grid_squares_per_row=10, grid_screens=1,\n num_goals=3, min_goal_dist=1, max_goal_dist=np.inf,\n num_keys=1, num_transporters=1,\n num_steps_before_energy_needed=11, energy_replenish=8, energy_sq_perc=0.00,\n death_sq_perc=0.07, ice_sq_perc=0.1,\n dynamics_type='default', reward_type='sparse',\n render_type='image', headless=False, one_hot_obs=True,\n seed=42, task_seed=None, init_pos_seed=None):\n\n # seed itself is depreciated but I'm keeping it at the moment because\n # removing it would break all the other code that depends on this.\n\n if task_seed is None:\n task_seed = seed\n\n if init_pos_seed is None:\n init_pos_seed = seed\n\n self.seed(init_pos_seed, task_seed)\n\n #seed = task_seed\n\n #random.seed(seed)\n #np.random.seed(seed)\n\n\n\n if headless is True:\n os.putenv('SDL_VIDEODRIVER', 'fbcon')\n os.environ[\"SDL_VIDEODRIVER\"] = \"dummy\"\n\n\n actions = {\n \"up\": K_UP,\n \"left\": K_LEFT,\n \"right\": K_RIGHT,\n \"down\": K_DOWN,\n 'reset': K_r,\n 'reset_board': K_t,\n 'reset_agent_start': K_a\n }\n\n self.actions = actions\n self.one_hot_obs = one_hot_obs\n self.screen_dim = (screen_height, screen_height) # width and height\n\n #PyGameWrapper.__init__(self, screen_height, screen_height, actions=actions)\n\n self.render_type = render_type\n self.colors = OrderedDict([('black', (0, 0, 0)),\n ('white', (255, 255, 255)),\n (\"silver\", (192, 192, 192)),\n ('gold', (255, 223, 0)),\n ('green', (0, 255, 0)),\n ('brown', (165, 42, 42)),\n ('orange', (255, 140, 0)),\n ('magenta', (255, 0, 255)),\n ('purple', (75, 0, 130)),\n ('red', (255, 0, 0))])\n\n self.tile_types = dict(hole=0, normal=1, goal=2, agent=3, transport=6, door=5, key=7, death=9, ice=8, energy=4)\n\n self.grid_screens = grid_screens\n self.grid_squares_per_row = grid_squares_per_row\n screen_height = screen_height - 2*0.8*(screen_height//grid_squares_per_row)\n self.grid_square_height = screen_height//grid_squares_per_row\n self.grid_square_margin = int(self.grid_square_height/5)\n self.grid_square_height = int(4*self.grid_square_height/5)\n self.agent_position = None\n self.agent_position_init = None\n self.screen = None\n self.game_grid = None\n self.game_grid_init = None\n self.dynamics = None\n self.num_goals = num_goals\n self.min_goal_dist = min_goal_dist\n self.max_goal_dist = max_goal_dist\n self.num_goals_obtained = 0\n self.goal_squares = None\n self.num_transporters = num_transporters\n self.transporters = None\n self.num_keys = num_keys\n self.has_key = False\n self.dead = False\n self.door_pos = None\n self.energy = num_steps_before_energy_needed\n self.energy_init = num_steps_before_energy_needed\n self.energy_replenish = energy_replenish\n self.death_sq_perc = death_sq_perc\n self.ice_sq_perc = ice_sq_perc\n self.energy_sq_perc = energy_sq_perc\n self.reward_type = reward_type\n self.dynamics_type = dynamics_type\n self.reset_board()\n pygame.init()\n self.screen = pygame.display.set_mode(self.getScreenDims())\n self.clock = pygame.time.Clock()\n self.render()\n\n def getScreenDims(self):\n return self.screen_dim\n\n def reset(self):\n # using the same board and the same agent start pos, reset the game.\n self.agent_position = copy.deepcopy(self.agent_position_init)\n self.game_grid = copy.deepcopy(self.game_grid_init)\n self.dead = False\n self.has_key = False\n self.energy = self.energy_init\n self.num_goals_obtained = 0\n self.render()\n return self.get_obs()\n\n def reset_including_x0(self):\n self.reset_agent_start_position()\n return self.reset()\n\n def reset_board(self):\n # reset the entire board and agent start position, generating a new MDP.\n self.has_key = False\n self.game_grid = np.ones(dtype=np.int32, shape=(self.grid_screens,\n self.grid_squares_per_row,\n self.grid_squares_per_row))\n self.game_grid *= self.tile_types['normal']\n self.reset_agent_start_position()\n if self.num_keys > 0:\n self.reset_key_position()\n self.reset_transporter_squares()\n self.reset_death_squares()\n self.reset_ice_squares()\n self.reset_energy_squares()\n self.reset_goal_squares()\n #self.change_colors()\n self.change_dynamics()\n self.num_goals_obtained = 0\n self.dead = False\n self.energy = self.energy_init\n self.game_grid_init = copy.deepcopy(self.game_grid)\n self.agent_position = copy.deepcopy(self.agent_position_init)\n\n def reset_agent_start_position(self):\n # keep the previous board but update the agents starting position.\n # keeps the previous MDP but samples x_0.\n found = False\n while found is False:\n cord_0 = 0\n cord_1 = self.init_pos_rng.randint(0, self.grid_squares_per_row-1)\n cord_2 = self.init_pos_rng.randint(0, self.grid_squares_per_row-1)\n if self.game_grid[cord_0, cord_2, cord_1] == self.tile_types['normal']:\n found = True\n self.agent_position = [cord_0, cord_1, cord_2]\n self.agent_position_init = copy.deepcopy(self.agent_position)\n #self.reset()\n\n #self.reset()\n\n def reset_key_position(self):\n def get_corner_square():\n g = self.task_rng.randint(1, self.grid_squares_per_row - 2, (2,))\n if g[0] != self.agent_position[1] or g[1] != self.agent_position[2]:\n return g\n return get_corner_square()\n wall_pos = get_corner_square()\n\n def get_door_square(wall_pos, range_x, range_y):\n axis = self.task_rng.randint(0, 1)\n if axis == 0:\n door_pos = [self.task_rng.randint(range_x[0], range_x[1]), wall_pos[1]]\n else:\n door_pos = [wall_pos[0], self.task_rng.randint(range_y[0], range_y[1])]\n if door_pos[0] != wall_pos[0] or door_pos[1] != wall_pos[1]:\n return door_pos\n return get_door_square(wall_pos, range_x, range_y)\n\n def get_key_square_and_agent_square(wall_pos):\n above = self.task_rng.randint(0, 1)\n key_sq = [0, 0]\n agent_sq = [0, 0]\n if above == 0:\n key_sq[0] = self.task_rng.randint(0, wall_pos[0])\n key_sq[1] = self.task_rng.randint(0, wall_pos[1])\n agent_sq[1] = self.task_rng.randint(0, wall_pos[0])\n agent_sq[0] = self.task_rng.randint(0, wall_pos[1])\n else:\n key_sq[0] = self.task_rng.randint(wall_pos[0], self.grid_squares_per_row-1)\n key_sq[1] = self.task_rng.randint(wall_pos[1], self.grid_squares_per_row-1)\n agent_sq[1] = self.task_rng.randint(wall_pos[0], self.grid_squares_per_row - 1)\n agent_sq[0] = self.task_rng.randint(wall_pos[1], self.grid_squares_per_row - 1)\n if agent_sq[1] != key_sq[0] or agent_sq[0] != key_sq[1]:\n if self.game_grid[0, agent_sq[1], agent_sq[0]] == self.tile_types['normal'] and self.game_grid[0, key_sq[1], key_sq[0]] == self.tile_types['normal']:\n return key_sq, agent_sq\n return get_key_square_and_agent_square(wall_pos)\n\n if wall_pos[0] > self.grid_squares_per_row // 2:\n range_x = [wall_pos[0], self.grid_squares_per_row - 1]\n for i in range(wall_pos[0], self.grid_squares_per_row):\n self.game_grid[:, i, wall_pos[1]] = self.tile_types['hole']\n else:\n range_x = [0, wall_pos[0]]\n for i in range(0, wall_pos[0]):\n self.game_grid[:, i, wall_pos[1]] = self.tile_types['hole']\n if wall_pos[1] > self.grid_squares_per_row // 2:\n range_y = [wall_pos[1], self.grid_squares_per_row - 1]\n for i in range(wall_pos[1], self.grid_squares_per_row):\n self.game_grid[:, wall_pos[0], i] = self.tile_types['hole']\n else:\n range_y = [0, wall_pos[1]]\n for i in range(0, wall_pos[1]):\n self.game_grid[:, wall_pos[0], i] = self.tile_types['hole']\n door_pos = get_door_square(wall_pos, range_x, range_y)\n self.game_grid[0, door_pos[0], door_pos[1]] = self.tile_types['door']\n\n key_sq, agent_sq = get_key_square_and_agent_square(wall_pos)\n self.agent_position = [0] + agent_sq\n self.agent_position_init = copy.deepcopy(self.agent_position)\n self.game_grid[0, key_sq[0], key_sq[1]] = self.tile_types['key']\n self.door_pos = [0, door_pos[0], door_pos[1]]\n\n def get_one_non_agent_square(self):\n g = self.task_rng.randint(0, self.grid_squares_per_row - 1, (2,))\n if g[0] != self.agent_position[1] or g[1] != self.agent_position[2]:\n return g\n return self.get_one_non_agent_square()\n\n def reset_goal_squares(self):\n gs = []\n self.goal_squares = []\n while len(gs) < self.num_goals:\n g = self.get_one_non_agent_square()\n if self.game_grid[0, g[0], g[1]] == self.tile_types['normal']:\n dist_1 = abs(g[1] - self.agent_position[1])\n dist_2 = abs(g[0] - self.agent_position[2])\n dist = dist_1 + dist_2\n if self.min_goal_dist < dist < self.max_goal_dist:\n gs.append(g)\n for g in gs:\n self.game_grid[0, g[0], g[1]] = self.tile_types['goal']\n self.goal_squares.append([0, g[0], g[1]])\n\n def reset_transporter_squares(self):\n gs = []\n for _ in range(self.num_transporters):\n g_1 = self.get_one_non_agent_square()\n g_2 = self.get_one_non_agent_square()\n if self.game_grid[0, g_1[0], g_1[1]] == self.tile_types['normal']:\n if self.game_grid[0, g_2[0], g_2[1]] == self.tile_types['normal']:\n gs.append([g_1, g_2])\n if len(gs) == self.num_transporters:\n for g in gs:\n for sub_g in g:\n self.game_grid[0, sub_g[0], sub_g[1]] = self.tile_types['transport']\n self.transporters = gs\n else:\n self.reset_transporter_squares()\n\n def reset_death_squares(self):\n ds = []\n num_d_squares = int(self.grid_squares_per_row * self.grid_squares_per_row * self.death_sq_perc)\n while len(ds) < num_d_squares:\n d = self.get_one_non_agent_square()\n if self.game_grid[0, d[0], d[1]] == self.tile_types['normal']:\n if self.door_pos is not None:\n dist_1 = abs(d[0] - self.door_pos[1])\n dist_2 = abs(d[1] - self.door_pos[2])\n dist = dist_1 + dist_2\n if dist > 2:\n ds.append(d)\n else:\n ds.append(d)\n for d in ds:\n self.game_grid[0, d[0], d[1]] = self.tile_types['death']\n\n def reset_ice_squares(self):\n ds = []\n num_d_squares = int(self.grid_squares_per_row * self.grid_squares_per_row * self.ice_sq_perc)\n while len(ds) < num_d_squares:\n d = self.get_one_non_agent_square()\n if self.game_grid[0, d[0], d[1]] == self.tile_types['normal']:\n if self.door_pos is not None:\n dist_1 = abs(d[0] - self.door_pos[1])\n dist_2 = abs(d[1] - self.door_pos[2])\n dist = dist_1 + dist_2\n if dist > 2:\n ds.append(d)\n else:\n ds.append(d)\n for d in ds:\n self.game_grid[0, d[0], d[1]] = self.tile_types['ice']\n\n def reset_energy_squares(self):\n ds = []\n num_d_squares = int(self.grid_squares_per_row * self.grid_squares_per_row * self.energy_sq_perc)\n while len(ds) < num_d_squares:\n d = self.get_one_non_agent_square()\n if self.game_grid[0, d[0], d[1]] == self.tile_types['normal']:\n ds.append(d)\n for d in ds:\n self.game_grid[0, d[0], d[1]] = self.tile_types['energy']\n\n def draw_grid(self):\n self.screen.fill(list(self.colors.values())[0])\n\n # Draw the grid\n for row in range(self.grid_squares_per_row):\n for column in range(self.grid_squares_per_row):\n colour_list = list(self.colors.values())\n colour_idx = self.game_grid[0][row][column]\n color = colour_list[colour_idx]\n pygame.draw.rect(self.screen,\n color,\n self.get_grid_square_screen_pos(row, column))\n\n def draw_agent(self):\n colour_list = list(self.colors.values())\n agent_color = (0, 0, 255) # blue\n\n pygame.draw.circle(self.screen,\n agent_color,\n self.get_agent_screen_position(),\n self.grid_square_height//2)\n\n def draw_status(self):\n if self.has_key:\n colour_list = list(self.colors.values())\n color_idx = self.tile_types['key']\n color = colour_list[color_idx]\n pygame.draw.rect(self.screen,\n color,\n self.get_grid_square_screen_pos(self.grid_squares_per_row+1, 2))\n\n if self.energy != np.inf:\n energy_sqs = self.energy // 3\n energy_sqs = int(min(energy_sqs, 5, self.grid_squares_per_row-5))\n for e_step in range(energy_sqs):\n colour_list = list(self.colors.values())\n color_idx = self.tile_types['energy']\n color = colour_list[color_idx]\n pygame.draw.rect(self.screen,\n color,\n self.get_grid_square_screen_pos(self.grid_squares_per_row+1, 5+e_step))\n\n def get_grid_square_screen_pos(self, row, column):\n pos = [\n (self.grid_square_margin + self.grid_square_height) * column + self.grid_square_margin,\n (self.grid_square_margin + self.grid_square_height) * row + self.grid_square_margin,\n self.grid_square_height,\n self.grid_square_height\n ]\n return pos\n\n def get_agent_screen_position(self):\n pos = [\n (self.grid_square_margin + self.grid_square_height) * self.agent_position[1] + self.grid_square_margin + self.grid_square_height // 2,\n (self.grid_square_margin + self.grid_square_height) * self.agent_position[2] + self.grid_square_margin + self.grid_square_height // 2,\n ]\n return pos\n\n def _handle_player_events(self):\n # this is bugged with python 3\n # The pygame support is not very good.\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n elif event.type == pygame.KEYDOWN:\n key = event.key\n\n if key == self.actions[\"left\"]:\n self.update_agent_pos('l')\n\n if key == self.actions[\"right\"]:\n self.update_agent_pos('r')\n\n if key == self.actions[\"up\"]:\n self.update_agent_pos('u')\n\n if key == self.actions[\"down\"]:\n self.update_agent_pos('d')\n\n if key == self.actions['reset']:\n self.reset()\n if key == self.actions['reset_board']:\n self.reset_board()\n if key == self.actions['reset_agent_start']:\n self.reset_including_x0()\n\n def update_agent_pos(self, command, recurs_step=0):\n if self.dead is False:\n nu_pos = copy.deepcopy(self.agent_position)\n #if command == 'u':\n # nu_pos[2] = self.agent_position[2] - 1\n #if command == 'd':\n # nu_pos[2] = self.agent_position[2] + 1\n #if command == 'l':\n # nu_pos[1] = self.agent_position[1] - 1\n #if command == 'r':\n # nu_pos[1] = self.agent_position[1] + 1\n\n nu_pos = self.add_lists(nu_pos, self.dynamics[command])\n\n if self.is_nu_pos_legal(nu_pos):\n self.agent_position = nu_pos\n self.check_at_goal()\n self.check_at_key()\n self.check_at_transporter()\n self.check_dead()\n self.check_at_energy()\n self.check_at_ice(command, recurs_step=recurs_step)\n if recurs_step == 0:\n self.energy -= 1\n if self.energy < 1:\n self.dead = True\n\n def is_nu_pos_legal(self, nu_pos):\n if (-1 < nu_pos[1] < self.grid_squares_per_row) and (-1 < nu_pos[2] < self.grid_squares_per_row): # in bounds\n if self.game_grid[nu_pos[0], nu_pos[2], nu_pos[1]] != self.tile_types['hole']: # not a hole\n if self.game_grid[nu_pos[0], nu_pos[2], nu_pos[1]] != self.tile_types['door']:\n return True\n else:\n if self.has_key:\n return True\n return False\n\n def check_at_ice(self, command, recurs_step):\n if recurs_step < 100:\n if self.game_grid[self.agent_position[0], self.agent_position[2], self.agent_position[1]] == self.tile_types['ice']:\n if command == 'l' or command == 'r':\n if 0 < self.agent_position[1] < self.grid_squares_per_row - 1:\n self.update_agent_pos(command, recurs_step=recurs_step+1)\n else:\n if 0 < self.agent_position[2] < self.grid_squares_per_row - 1:\n self.update_agent_pos(command, recurs_step=recurs_step+1)\n\n def check_at_energy(self):\n if self.game_grid[self.agent_position[0], self.agent_position[2], self.agent_position[1]] == self.tile_types['energy']:\n self.game_grid[self.agent_position[0], self.agent_position[2], self.agent_position[1]] = self.tile_types['normal']\n self.energy += self.energy_replenish\n\n def check_at_goal(self):\n if self.game_grid[self.agent_position[0], self.agent_position[2], self.agent_position[1]] == self.tile_types['goal']:\n self.game_grid[self.agent_position[0], self.agent_position[2], self.agent_position[1]] = self.tile_types['normal']\n self.num_goals_obtained += 1\n\n def check_at_key(self):\n if self.game_grid[self.agent_position[0], self.agent_position[2], self.agent_position[1]] == self.tile_types['key']:\n self.has_key = True\n self.game_grid[self.agent_position[0], self.agent_position[2], self.agent_position[1]] = self.tile_types['normal']\n\n def check_at_transporter(self):\n transport_sq = None\n if self.game_grid[self.agent_position[0], self.agent_position[2], self.agent_position[1]] == self.tile_types['transport']:\n for tr in self.transporters:\n if self.agent_position[2] == tr[0][0] and self.agent_position[1] == tr[0][1]:\n transport_sq = tr[1]\n elif self.agent_position[2] == tr[1][0] and self.agent_position[1] == tr[1][1]:\n transport_sq = tr[0]\n if transport_sq is not None:\n self.agent_position = [0, transport_sq[1], transport_sq[0]]\n\n def check_dead(self):\n if self.game_grid[self.agent_position[0], self.agent_position[2], self.agent_position[1]] == self.tile_types['death']:\n self.dead = True\n #if self.energy == 0:\n # self.dead = True\n\n def render(self):\n self.draw_grid()\n self.draw_agent()\n self.draw_status()\n self._handle_player_events()\n self.clock.tick_busy_loop(40000) # this limits FPS.\n pygame.display.flip()\n\n def step_rl(self, action):\n action = self.hash_action(action)\n self.update_agent_pos(command=action)\n return self.get_obs(), self.get_reward(), self.dead, dict()\n\n def step(self, action):\n self.render()\n return self.step_rl(action)\n\n def hash_action(self, an_int):\n if an_int == 0:\n return 'u'\n elif an_int == 1:\n return 'd'\n elif an_int == 2:\n return 'l'\n elif an_int == 3:\n return 'r'\n else:\n raise NotImplementedError\n\n def init(self):\n self.reset_board()\n\n def change_colors(self):\n self.tile_types = dict(zip(list(self.tile_types.keys()), self.task_rng2.sample(list(self.tile_types.values()), len(self.tile_types))))\n return self.tile_types\n #vs = self.tile_types.values()\n #ks = self.tile_types.keys()\n #nu_tile_types = copy.deepcopy(self.tile_types)\n #for\n #items = self.colors.items()\n #random.shuffle(items)\n #self.colors = OrderedDict(items)\n\n def change_dynamics(self):\n def map_randint_to_movement(randint):\n if randint == 0:\n return [0, 0, -1]\n elif randint == 1:\n return [0, 0, 1]\n elif randint == 2:\n return [0, -1, 0]\n elif randint == 3:\n return [0, 1, 0]\n\n if self.dynamics_type == 'simple':\n # every button takes one move in a randomized direction.\n random_act = [i for i in range(4)]\n self.task_rng2.shuffle(random_act)\n random_act = [map_randint_to_movement(ra) for ra in random_act]\n self.dynamics = dict(u=random_act[0], d=random_act[1], l=random_act[2], r=random_act[3])\n elif self.dynamics_type == 'moderate':\n # every button takes between one and two moves in a randomized direction.\n random_act = range(4)\n temp = []\n for iter_step in range(2):\n ra = copy.deepcopy(random_act)\n self.task_rng2.shuffle(ra)\n temp.append(copy.deepcopy(ra))\n random_act = temp\n r_act = []\n for rand_actz in random_act:\n r_act.append([map_randint_to_movement(ra) for ra in rand_actz])\n r_act_final = [0 for _ in range(3)]\n r_act_final = [copy.deepcopy(r_act_final) for _ in range(4)]\n for i in range(2):\n for j in range(4):\n for k in range(3):\n r_act_final[j][k] += r_act[i][j][k]\n random_act = r_act_final\n self.dynamics = dict(u=random_act[0], d=random_act[1], l=random_act[2], r=random_act[3])\n elif self.dynamics_type == 'hard':\n # every button pres takes between one and three moves in a randomized direction.\n random_act = range(4)\n temp = []\n for iter_step in range(3):\n ra = copy.deepcopy(random_act)\n self.task_rng2.shuffle(ra)\n temp.append(copy.deepcopy(ra))\n random_act = temp\n r_act = []\n for rand_actz in random_act:\n r_act.append([map_randint_to_movement(ra) for ra in rand_actz])\n r_act_final = [0 for _ in range(3)]\n r_act_final = [copy.deepcopy(r_act_final) for _ in range(4)]\n for i in range(3):\n for j in range(4):\n for k in range(3):\n r_act_final[j][k] += r_act[i][j][k]\n random_act = r_act_final\n self.dynamics = dict(u=random_act[0], d=random_act[1], l=random_act[2], r=random_act[3])\n else:\n self.dynamics = dict(u=[0, 0, -1], d=[0, 0, 1], l=[0, -1, 0], r=[0, 1, 0])\n return self.dynamics\n\n def add_lists(self, *lists):\n list_final = [0 for _ in range(len(lists[0]))]\n for listz in lists:\n for iter_step, l in enumerate(listz):\n list_final[iter_step] += l\n return list_final\n\n def get_reward(self):\n if self.reward_type == 'sparse':\n return 0 + self.num_goals_obtained\n else:\n rew = 0\n for goal in self.goal_squares:\n dist_1 = abs(goal[1] - self.agent_position[1])\n dist_2 = abs(goal[2] - self.agent_position[2])\n rew = rew + dist_1 + dist_2\n rew = -1.0*rew\n return rew\n\n def get_obs(self):\n r_type = self.render_type\n if r_type == 'image':\n return pygame.surfarray.array3d(pygame.display.get_surface()).astype(np.uint8)\n if r_type == 'global':\n obs = self.game_grid.flatten(), np.array(self.agent_position[1:])\n elif r_type == 'local':\n neighbors = []\n v, x, y = self.agent_position\n for _i, _j in [(-1, -1), (0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0)]:\n i, j = (_i + x, _j + y)\n if 0 <= i < self.grid_squares_per_row and 0 <= j < self.grid_squares_per_row:\n neighbors.append(self.game_grid[0, j, i])\n else:\n neighbors.append(None)\n obs = np.array(neighbors + [v]), np.array([x, y])\n if self.one_hot_obs:\n x, y = obs[1]\n obs = self.one_hot(obs[0], len(self.tile_types)), \\\n self.one_hot(np.array([x * self.grid_squares_per_row + y]), self.grid_squares_per_row ** 2)\n\n return np.concatenate(list(map(lambda o: o.flatten(), obs)))\n\n @staticmethod\n def one_hot(vec, size):\n flattened = vec.flatten()\n state_len = flattened.shape[0]\n oh = np.zeros((state_len, size))\n for i, s in enumerate(flattened):\n oh[i][s] = 1\n return oh\n\n\ndef run_grid():\n game = KrazyGridWorld(**MEDIUM_GRID_KWARGS)\n game.change_colors()\n game.reset_including_x0()\n\n import random\n import time\n while True:\n game.render()\n for i in range(3):\n time.sleep(0.5)\n rint = random.randint(0, 3)\n game.step(rint)\n game.render()\n\n time.sleep(1.5)\n game.reset_board()\n game.change_colors()\n\nif __name__ == \"__main__\":\n run_grid()"
] |
[
[
"numpy.array",
"numpy.random.RandomState",
"numpy.zeros",
"numpy.ones"
]
] |
Adn425/Correlation
|
[
"20a4beee97dabd2622731ae6faa20829d08c34f9"
] |
[
"setup.py"
] |
[
"import plotly.express as px\nimport csv\nimport numpy as np\n\ndef plotFigure(data_path):\n with open(data_path) as csv_file:\n df = csv.DictReader(csv_file)\n fig = px.scatter(df,x=\"Coffee in ml\", y=\"sleep in hours\")\n fig.show()\n\ndef getDataSource(data_path):\n marks_in_percentage = []\n days_present = []\n with open(data_path) as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n marks_in_percentage.append(float(row[\"sleep in hours\"]))\n days_present.append(float(row[\"Coffee in ml\"]))\n\n return {\"x\" : marks_in_percentage, \"y\": days_present}\n\ndef findCorrelation(datasource):\n correlation = np.corrcoef(datasource[\"x\"], datasource[\"y\"])\n print(\"Correlation between coffee in ml vs sleep in hours:\" + correlation)\n\ndef setup():\n data_path = \"finding-correlation-master\\cups of coffee vs hours of sleep.csv\"\n\n datasource = getDataSource(data_path)\n findCorrelation(datasource)\n plotFigure(data_path)\n\nsetup()\n"
] |
[
[
"numpy.corrcoef"
]
] |
billhu0228/SmartRoadBridgePy
|
[
"4a5d34028a2612aef846b580733bf6f488110798"
] |
[
"src/srbpy/model/core.py"
] |
[
"# -*- coding : utf-8-*-\nimport copy\nimport json\nimport os\nimport zipfile\nimport numpy as np\nimport sqlalchemy\nfrom decimal import ROUND_HALF_UP, Decimal\nfrom PyAngle import Angle\nfrom numpy import loadtxt, pi\nfrom sqlalchemy import create_engine, event\nfrom sqlalchemy.orm import sessionmaker\nfrom xml.dom.minidom import Document\nfrom ..alignment.align import Align\nfrom ..server import Base, Column, String, Text, ForeignKey, relationship, Float, FLOAT, DECIMAL\nfrom ezdxf.math import Vec2, Matrix44, Vector\nfrom ..stdlib.supstructures import CIPBoxPoints\n\n\nclass Bridge(Base):\n __tablename__ = \"bridge_tbl\"\n name = Column('name', String(10), primary_key=True)\n _f_ali_name = Column('align_name', String(10), ForeignKey(\"ei_tbl.name\", ondelete='CASCADE', onupdate='CASCADE'))\n _f_title_name = Column('title_name', String(120), nullable=True)\n RelatedAlign = relationship(\"Align\", foreign_keys=[_f_ali_name], cascade='save-update,delete')\n\n def __init__(self, name: str, al: Align):\n self.name = name\n self.RelatedAlign = al\n self.spanlist = []\n self.ciplist = []\n\n def set_title(self, title: str):\n self._f_title_name = title\n\n def serialize(self):\n return json.dumps({\"name\": self.name})\n\n def assign_sup(self, inst_name: str, inst: Base, spans: list, st_pk: float, end_pk: float, steps=0.1):\n \"\"\"\n 指定上部结构.\n\n Args:\n inst_name:\n inst:\n spans:\n st_pk:\n end_pk:\n steps: 现浇梁线段步长\n\n Returns:\n\n \"\"\"\n sup_inst = inst.copy()\n sup_inst.Name = inst_name\n sup_inst.start_pk = st_pk\n sup_inst.end_pk = end_pk\n sup_inst.RelatedAlign = self.RelatedAlign\n sup_inst.RelatedBridge = self\n for ii, sp in enumerate(spans):\n setattr(sup_inst, \"RelatedSpan%i\" % ii, sp)\n # 补充KP\n npts = int((end_pk - st_pk) / steps) + 1\n sideL = ((end_pk - st_pk) - (npts - 3) * steps) * 0.5\n for i in range(npts):\n if i == 0:\n dx = st_pk\n elif i == npts - 1:\n dx = end_pk\n else:\n dx = st_pk + sideL + (i - 1) * steps\n x0, y0 = sup_inst.RelatedAlign.get_coordinate(dx)\n z0 = sup_inst.RelatedAlign.get_elevation(dx)\n KP = CIPBoxPoints(i, float(x0), float(y0), float(z0))\n KP.SetRelatedCIPBox(sup_inst)\n sup_inst.KeyPointsList.append(KP)\n #\n self.ciplist.append(sup_inst)\n pass\n\n\nclass Span(Base):\n # 增加ORM映射 -- Bill 2020/11/18\n __tablename__ = \"span_tbl\"\n name = Column(\"name\", String(17), primary_key=True)\n _fAli_name = Column('align_name', String(10), ForeignKey(\"ei_tbl.name\", ondelete='CASCADE', onupdate='CASCADE'))\n _fBri_name = Column('bridge_name', String(10),\n ForeignKey(\"bridge_tbl.name\", ondelete='CASCADE', onupdate='CASCADE'))\n align = relationship(\"Align\", foreign_keys=[_fAli_name], cascade='save-update,delete')\n bridge = relationship(\"Bridge\", foreign_keys=[_fBri_name], cascade='save-update,delete')\n\n _fStation = Column('Station', DECIMAL(15, 3))\n _fAngle = Column('Angle', DECIMAL(15, 3))\n _f_deck_wl = Column('deck_wl', DECIMAL(15, 3), nullable=True)\n _f_deck_wr = Column('deck_wr', DECIMAL(15, 3), nullable=True)\n _f_back_wl = Column('back_wl', DECIMAL(15, 3), nullable=True)\n _f_back_wr = Column('back_wr', DECIMAL(15, 3), nullable=True)\n _f_front_wl = Column('front_wl', DECIMAL(15, 3), nullable=True)\n _f_front_wr = Column('front_wr', DECIMAL(15, 3), nullable=True)\n _fBeam_type = Column('BeamType', String(1), nullable=True)\n _fPier_type = Column('PierType', String(1), nullable=True)\n _fDeck_type = Column('DeckType', String(2), nullable=True, default=\"CT\")\n _fCut_to = Column(\"cut_to\", String(17), nullable=True)\n _fCut_by = Column(\"cut_by\", String(17), nullable=True)\n _fHPL = Column(\"HPL\", DECIMAL(15, 3), nullable=True)\n _fHPR = Column(\"HPR\", DECIMAL(15, 3), nullable=True)\n\n # 增加ORM映射 -- Bill 2020/11/18\n\n def __init__(self, align: Align, bridge: Bridge, station: float, ang_deg: float = 90):\n \"\"\"\n 跨径线对象\n\n Args:\n align (Align): 跨径线对应路线\n bridge (Bridge): 跨径线对应桥梁\n station (float): 跨径线桩号\n ang_deg (float): 斜交角, 正交时为90, 逆时针为正, 默认值为90.\n\n \"\"\"\n self.align = align\n self.bridge = bridge\n self.station = station\n self.angle = Angle.from_degrees(ang_deg).to_rad()\n self.elevation = align.get_elevation(station)\n self.ground_elevation = align.get_ground_elevation(station, 0)\n self.width_left, self.width_right = align.get_width(station, self.angle)\n self.hp_left, self.hp_right = align.get_cross_slope(station, self.angle)\n\n self.pier = None\n self.foundation = None\n self.mj = None\n self.bearings = []\n # 增加ORM映射 -- Bill 2020/11/18\n result = (\"%.3f\" % (float(Decimal(station).quantize(Decimal('0.000'), rounding=ROUND_HALF_UP)))).zfill(9)\n self.name = align.name + \"+\" + result\n self._fStation = station\n self._fAngle = ang_deg\n self._fHPL = self.hp_left\n self._fHPR = self.hp_right\n self._f_deck_wl = self.width_left\n self._f_deck_wr = self.width_right\n # 增加ORM映射 -- Bill 2020/11/18\n self.pier = None # 增加结构指定\n\n def __str__(self):\n return self.name\n\n def __eq__(self, other):\n if isinstance(other, Span):\n if str(self) == str(other):\n return True\n else:\n return False\n\n def __lt__(self, other):\n if isinstance(other, Span):\n return self.station < other.station\n else:\n raise Exception(\"无法与非Span类进行比较.\")\n\n def __add__(self, dist: float):\n return Span(self.align, self.bridge, self.station + dist, self.angle)\n\n def __sub__(self, other) -> float:\n if isinstance(other, Span):\n if other.align == self.align:\n return self.station - other.station\n else:\n print(\"警告:桩号不在同一条设计线\")\n return self.station - other.station\n raise Exception(\"无法与其他类型相减.\")\n\n def serialize(self):\n dict = {\n \"align\": self.align.name,\n \"bridge\": self.bridge.name,\n \"station\": self.station,\n \"angle\": self.angle,\n \"elevation\": self.elevation,\n \"ground_elevation\": self.ground_elevation,\n \"width_left\": self.width_left,\n \"width_right\": self.width_right,\n \"hp_left\": self.hp_left,\n \"hp_right\": self.hp_right,\n }\n return json.dumps(dict)\n\n def assign_substructure(self, inst_name: str, sub_inst: Base):\n \"\"\"\n 向S附加下部结构\n Args:\n inst_name: 主键\n sub_inst: 实例\n\n Returns:\n\n \"\"\"\n self.pier = sub_inst.copy()\n self.pier.Sub_Inst.Name = inst_name\n self.pier.Sub_Inst.RelatedSpan = self\n for ii, col in enumerate(self.pier.ColumnList):\n col.Name = inst_name + \"/COL%s\" % str(ii + 1).zfill(2)\n self.pier.CapBeam_Inst.Name = inst_name + \"/CB01\"\n xyz = self.align.get_coordinate(self.station)\n xyz.append(self.align.get_ground_elevation(self.station, 0))\n span_cc = Vector(xyz)\n uux = Vector(self.align.get_direction(self.station))\n uuy = uux.rotate_deg(90.0)\n uuz = Vector(0, 0, 1)\n trans_matrix = Matrix44.ucs(uux, uuy, uuz, span_cc)\n self.pier.transform(trans_matrix)\n pass\n\n def assign_found2(self, inst_name: str, fund_inst: Base):\n self.foundation = fund_inst.copy()\n self.foundation.Found_Inst.Name = inst_name\n self.foundation.Found_Inst.RelatedSpan = self\n for ii, pile in enumerate(self.foundation.PileList):\n pile.Name = inst_name + \"/PI%s\" % str(ii + 1).zfill(2)\n for ii, pc in enumerate(self.foundation.PileCapList):\n pc.Name = inst_name + \"/PC%s\" % str(ii + 1).zfill(2)\n xyz = self.align.get_coordinate(self.station)\n xyz.append(self.align.get_ground_elevation(self.station, 0))\n span_cc = Vector(xyz)\n uux = Vector(self.align.get_direction(self.station))\n uuy = uux.rotate_deg(90.0)\n uuz = Vector(0, 0, 1)\n trans_matrix = Matrix44.ucs(uux, uuy, uuz, span_cc)\n self.foundation.transform(trans_matrix)\n pass\n\n def assign_found(self, inst_name: str, fund_inst: Base,\n off_l: float = 0, off_w: float = 0, off_h: float = 0,\n angle_deg: float = 0):\n \"\"\"\n 指定属于本处分跨线的基础结构实例.\n\n Args:\n inst_name: 基础编号.\n fund_inst: 基础实例.\n off_h: 竖向偏心, 默认值为 0 表示地面线下0.5m.\n off_w: 横桥向偏心, 默认值为 0.\n off_l: 顺桥向偏心, 默认值为 0.\n angle_deg :基础相对于span平面的偏角, 默认值为 0, 逆时针为正.\n Returns:\n\n \"\"\"\n self.foundation = copy.deepcopy(fund_inst)\n self.foundation.Name = inst_name\n self.foundation.align = self.align\n self.foundation.bridge = self.bridge\n self.foundation.RelatedSpan = self\n cc = Vec2(self.align.get_coordinate(self.station))\n l_unit = Vec2(self.align.get_direction(self.station))\n w_unit = l_unit.rotate_deg(90.0)\n delta = off_l * l_unit + off_w * w_unit\n new_cc = cc + delta\n z0 = self.align.get_ground_elevation(self.station, 0) - 0.5 + off_h\n x0 = new_cc.x\n y0 = new_cc.y\n ref_v = Vec2(self.align.get_direction(self.station))\n ref_v = ref_v.rotate_deg(Angle.from_rad(self.angle).to_degrees() + angle_deg - 90.0)\n ang2north = ref_v.angle_between(Vec2([0, 1]))\n self.foundation.AngOfNorth = ang2north # 弧度\n self.foundation.X = x0\n self.foundation.Y = y0\n self.foundation.Z = z0\n\n pass\n\n def assign_bearing(self, inst_name: str, br_inst: Base, offset=None):\n \"\"\"\n\n Args:\n inst_name:\n br_inst:\n offset:\n\n Returns:\n\n \"\"\"\n\n if offset is None:\n offset = [0, 0]\n br_cp = br_inst.copy()\n\n br_cp.Name = inst_name\n br_cp.RelatedSpan = self\n bk_supper = None # 后排上部\n ft_supper = None # 前排上部\n supper = None\n for cip in self.bridge.ciplist:\n for ii, sp in enumerate(cip.span_list()):\n if sp.name == self.name:\n if ii == 0:\n ft_supper = cip\n elif ii == len(cip.span_list()) - 1:\n bk_supper = cip\n else:\n supper = cip\n\n xyz = self.align.get_coordinate(self.station)\n\n xyz.append()\n span_cc = Vector(xyz)\n\n def make_happy(self):\n pass\n # def assign_pier(self, name_inst: str, pier_inst: PierBase):\n # \"\"\"\n # 指定属于本处分跨线的桥墩结构实例.\n #\n # Args:\n # name: 桥墩编号.\n # pier_inst: 桥墩实例.\n #\n # Returns:\n #\n # \"\"\"\n # self.pier = copy.deepcopy(pier_inst)\n # self.pier._fName = name_inst\n # self.pier.align = self.align\n # self.pier.bridge = self.bridge\n # self.pier.span = self\n # self.pier._fStation = self._fStation\n # self.pier._fAngle = self._fAngle\n # self.pier._fSlopeLeft = self._fHPL\n # self.pier._fSlopeRight = self._fHPR\n # pass\n\n\nclass SpanCollection(list):\n def __init__(self, align_dict: dict, bridge_dict: dict):\n super().__init__()\n self.align_dict = align_dict\n self.bridge_dict = bridge_dict\n\n def add(self, s: Span = None, align: Align = None, bridge: Bridge = None, station: float = None,\n ang_deg: float = None) -> Span:\n if s != None:\n res = s\n elif align != None and bridge != None:\n res = Span(align, bridge, station, Angle.from_degrees(ang_deg).to_rad())\n else:\n raise Exception(\"参数不足.\")\n self.append(res)\n self.sort()\n return res\n\n def read_csv(self, csv_path, sep=','):\n data = loadtxt(csv_path, delimiter=sep, dtype=str)\n for line in data:\n self.append(Span(self.align_dict[line[0]],\n self.bridge_dict[line[1]],\n float(line[2]),\n Angle.from_degrees(line[3]).to_rad()\n ))\n self.sort()\n\n def __getitem__(self, item) -> Span:\n return super(SpanCollection, self).__getitem__(item)\n\n\nclass Model(object):\n '''\n 基础模型\n '''\n\n def __init__(self):\n self.alignments = {}\n self.bridges = {}\n self.spans = SpanCollection(self.alignments, self.bridges)\n\n def add_align(self, alignment: Align) -> int:\n \"\"\"\n 导入路线数据。\n\n Args:\n alignment: 路线对象\n\n Returns:\n int: 成功时返回 0,失败返回 -1\n \"\"\"\n\n try:\n self.alignments[alignment.name] = alignment\n return 0\n except Exception as e:\n print(e)\n return -1\n\n def add_bridge(self, bri: Bridge) -> int:\n \"\"\"\n 导入桥梁数据。\n\n Args:\n bri (Bridge): 桥梁对象\n\n Returns:\n int : 成功时返回 0,失败返回 -1\n\n \"\"\"\n try:\n self.bridges[bri.name] = bri\n return 0\n except Exception as e:\n print(e)\n return -1\n\n def add_span(self, spa: Span) -> int:\n try:\n self.spans.append(spa)\n self.spans.sort()\n spa.bridge.spanlist.append(spa)\n return 0\n except Exception as e:\n print(e)\n return -1\n\n def _project_xml(self) -> Document:\n \"\"\"\n 生成project.xml\n\n Returns:\n Document : <class 'xml.dom.minidom.Document'>\n\n \"\"\"\n doc = Document()\n pro = doc.createElement('project')\n brs = doc.createElement('bridges')\n als = doc.createElement('alignments')\n\n for key in self.alignments.keys():\n align = self.alignments[key]\n al = doc.createElement('alignment')\n al.setAttribute(\"name\", align.name)\n file = doc.createElement(\"fileLocation\")\n file.appendChild(doc.createTextNode(align._work_dir))\n al.appendChild(file)\n als.appendChild(al)\n for key in self.bridges.keys():\n bri = self.bridges[key]\n br = doc.createElement('bridge')\n br.setAttribute(\"name\", bri.name)\n brs.appendChild(br)\n doc.appendChild(pro)\n pro.appendChild(als)\n pro.appendChild(brs)\n include = doc.createElement(\"include\")\n include.appendChild(doc.createTextNode(\"./spans.xml\"))\n pro.appendChild(include)\n return doc\n\n def _make_span_xml(self) -> Document:\n doc = Document()\n\n return doc\n\n def save_srb(self, filename):\n CLEANTMPS = True\n tmp, ex = os.path.splitext(filename)\n file = tmp + '.srb'\n z = zipfile.ZipFile(file, 'w', zipfile.ZIP_DEFLATED)\n\n proj_doc = self._project_xml()\n fpath = os.path.dirname(filename) + \"/project.xml\"\n with open(fpath, 'wb') as f:\n f.write(proj_doc.toprettyxml(indent='\\t', encoding='utf-8'))\n\n z.write(fpath)\n if CLEANTMPS:\n os.remove(fpath)\n\n span_doc = self._make_span_xml()\n fpath = os.path.dirname(filename) + \"/span.xml\"\n with open(fpath, 'wb') as f:\n f.write(span_doc.toprettyxml(indent='\\t', encoding='utf-8'))\n z.write(fpath)\n if CLEANTMPS:\n os.remove(fpath)\n z.close()\n\n def save_sql(self, connect):\n engine = create_engine(connect, echo=False)\n event.listen(engine, \"before_cursor_execute\", add_own_encoders)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n for al in self.alignments.keys():\n session.add(self.alignments[al])\n session.commit()\n for br in self.bridges.keys():\n session.add(self.bridges[br])\n session.commit()\n for sp in self.spans:\n session.add(sp)\n if sp.pier is not None:\n session.add(sp.pier.Sub_Inst) # 添加下部结点\n session.add(sp.pier.CapBeam_Inst) # 添加下部盖梁\n for col in sp.pier.ColumnList: # 添加墩柱\n session.add(col)\n if sp.foundation is not None:\n session.add(sp.foundation.Found_Inst)\n for pc in sp.foundation.PileCapList:\n session.add(pc)\n for pile in sp.foundation.PileList:\n session.add(pile)\n session.commit()\n for br in self.bridges.keys():\n for cip in self.bridges[br].ciplist:\n session.add(cip)\n for kp in cip.KeyPointsList:\n session.add(kp)\n session.commit()\n\n\ndef add_own_encoders(conn, cursor, query, *args):\n # try:\n # cursor.connection.encoders[np.float64] = lambda value, encoders: float(value)\n pass\n"
] |
[
[
"numpy.loadtxt"
]
] |
MosHumanoid/bitbots_thmos_meta
|
[
"f45ccc362dc689b69027be5b0d000d2a08580de4"
] |
[
"bitbots_vision/bitbots_vision/src/bitbots_vision/vision_modules/yolo_handler.py"
] |
[
"import cv2\nimport os\nimport abc\nimport rospy\nimport numpy as np\nfrom math import exp\nfrom collections import defaultdict\nfrom .candidate import CandidateFinder, Candidate\ntry:\n from pydarknet import Detector, Image\nexcept ImportError:\n rospy.logerr(\"Not able to run Darknet YOLO! Its only executable under python3 with yolo34py or yolo34py-gpu installed.\", logger_name=\"vision_yolo\")\ntry:\n from openvino.inference_engine import IENetwork, IECore\nexcept ImportError:\n rospy.logerr(\"Not able to run YOLO on the Intel NCS2 TPU! The OpenVINO SDK should be installed if you intend to run YOLO on the TPU\", logger_name=\"vision_yolo\")\ntry:\n ie = IECore()\nexcept NameError:\n rospy.logerr(\"Please install/source OpenVino environment to use the NCS2 YOLO Handler.\", logger_name=\"vision_yolo\")\n\n\nclass YoloHandler:\n \"\"\"\n Defines an abstract YoloHandler, which runs/manages the YOLO inference.\n\n Our YOLO is currently able to detect goalpost and ball candidates.\n \"\"\"\n def __init__(self, config, model_path):\n \"\"\"\n Initialization of the abstract YoloHandler.\n \"\"\"\n self._candidates = None\n self._image = None\n\n # Load possible class names\n namepath = os.path.join(model_path, \"obj.names\")\n with open(namepath, \"r\") as fp:\n self._class_names = fp.read().splitlines()\n\n # Set config\n self.set_config(config)\n\n def set_config(self, config):\n \"\"\"\n Set a new config dict, for parameter adjestments\n\n :param dict: dict with config values\n \"\"\"\n # Set if values should be cached\n self._caching = config['caching']\n self._nms_threshold = config['yolo_nms_threshold']\n self._confidence_threshold = config['yolo_confidence_threshold']\n self._config = config\n\n def set_image(self, img):\n \"\"\"\n Set a image for yolo. This also resets the caches.\n\n :param image: current vision image\n \"\"\"\n # Set image\n self._image = img\n # Reset cached stuff\n self._candidates = None\n\n @abc.abstractmethod\n def predict(self):\n \"\"\"\n Implemented version should run the neural metwork on the latest image. (Cached)\n \"\"\"\n raise NotImplementedError\n\n def get_candidates(self, class_name):\n \"\"\"\n Runs neural network and returns results for all classes. (Cached)\n\n :param class_name: The name of the class you want to query\n \"\"\"\n assert class_name in self._class_names, f\"Class '{class_name}' is not available for the current yolo model!\"\n self.predict()\n return self._candidates[class_name]\n\n def get_classes(self):\n return self._class_names\n\n\nclass YoloHandlerDarknet(YoloHandler):\n \"\"\"\n Yolo34py library implementation of our yolo model.\n \"\"\"\n def __init__(self, config, model_path):\n \"\"\"\n Initialization of the YoloHandlerDarknet\n\n :param config: vision config dict\n :param model_path: path to the yolo model\n \"\"\"\n # Define more paths\n weightpath = os.path.join(model_path, \"yolo_weights.weights\")\n configpath = os.path.join(model_path, \"config.cfg\")\n datapath = os.path.join(\"/tmp/obj.data\")\n namepath = os.path.join(model_path, \"obj.names\")\n # Generates a dummy file for the library\n self._generate_dummy_obj_data_file(namepath)\n\n self._config = config\n\n # Setup detector\n self._net = Detector(bytes(configpath, encoding=\"utf-8\"), bytes(weightpath, encoding=\"utf-8\"), 0.5, bytes(datapath, encoding=\"utf-8\"))\n super().__init__(config, model_path)\n\n def _generate_dummy_obj_data_file(self, obj_name_path):\n \"\"\"\n Generates a dummy object data file.\n In which some meta information for the library is stored.\n\n :param obj_name_path: path to the class name file\n \"\"\"\n # Generate file content\n obj_data = \"classes = 2\\nnames = \" + obj_name_path\n # Write file\n with open('/tmp/obj.data', 'w') as f:\n f.write(obj_data)\n\n def predict(self):\n \"\"\"\n Runs the neural network\n \"\"\"\n # Check if cached\n if self._candidates is None or not self._caching:\n # Run neural network\n results = self._net.detect(Image(self._image))\n # Init lists\n self._candidates = defaultdict(list)\n # Go through results\n for out in results:\n # Get class id\n class_id = out[0]\n # Get confidence\n confidence = out[1]\n if confidence > self._confidence_threshold:\n # Get candidate position and size\n x, y, w, h = out[2]\n x = x - int(w // 2)\n y = y - int(h // 2)\n # Create candidate\n c = Candidate(int(x), int(y), int(w), int(h), confidence)\n # Append candidate to the right list depending on the class\n assert class_id.decode() in self._class_names, \\\n f\"Predicted class {class_id.decode()} not in {self._class_names}.\"\n self._candidates[class_id.decode()].append(c)\n\nclass YoloHandlerOpenCV(YoloHandler):\n \"\"\"\n Opencv library implementation of our yolo model.\n \"\"\"\n def __init__(self, config, model_path):\n \"\"\"\n Initialization of the YoloHandlerOpenCV\n\n :param config:\n :param model_path:\n \"\"\"\n # Build paths\n weightpath = os.path.join(model_path, \"yolo_weights.weights\")\n configpath = os.path.join(model_path, \"config.cfg\")\n # Setup neural network\n self._net = cv2.dnn.readNet(weightpath, configpath)\n # Set default state to all cached values\n self._image = None\n super().__init__(config, model_path)\n\n def _get_output_layers(self):\n \"\"\"\n Library stuff\n \"\"\"\n layer_names = self._net.getLayerNames()\n\n output_layers = [layer_names[i[0] - 1] for i in self._net.getUnconnectedOutLayers()]\n\n return output_layers\n\n def predict(self):\n \"\"\"\n Runs the neural network\n \"\"\"\n # Check if cached\n if self._candidates is None or not self._caching:\n # Set image\n blob = cv2.dnn.blobFromImage(self._image, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\n self._net.setInput(blob)\n self._width = self._image.shape[1]\n self._height = self._image.shape[0]\n # Run net\n self._outs = self._net.forward(self._get_output_layers())\n # Create lists\n class_ids = []\n confidences = []\n boxes = []\n self._candidates = defaultdict(list)\n # Iterate over output/detections\n for out in self._outs:\n for detection in out:\n # Get score\n scores = detection[5:]\n # Ger class\n class_id = np.argmax(scores)\n # Get confidence from score\n confidence = scores[class_id]\n # First threshold to decrease candidate count and inscrease performance\n if confidence > self._confidence_threshold:\n # Get center point of the candidate\n center_x = int(detection[0] * self._width)\n center_y = int(detection[1] * self._height)\n # Get the heigh/width\n w = int(detection[2] * self._width)\n h = int(detection[3] * self._height)\n # Calc the upper left point\n x = center_x - w / 2\n y = center_y - h / 2\n # Append result\n class_ids.append(class_id)\n confidences.append(float(confidence))\n boxes.append([x, y, w, h])\n\n # Merge boxes\n indices = cv2.dnn.NMSBoxes(boxes, confidences, self._confidence_threshold, self._nms_threshold)\n\n # Iterate over filtered boxes\n for i in indices:\n # Get id\n i = i[0]\n # Get box\n box = boxes[i]\n # Convert the box position/size to int\n box = list(map(int, box))\n # Create the candidate\n c = Candidate(*box, confidences[i])\n # Append candidate to the right list depending on the class\n class_id = class_ids[i]\n class_name = self._class_names[class_id]\n self._candidates[class_name].append(c)\n\nclass YoloHandlerNCS2(YoloHandler):\n \"\"\"\n The following code is based on a code example from the Intel documentation under following licensing:\n\n Copyright (C) 2018-2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n Following changes were made:\n - Different class handling\n - Modifications for our framework\n - Different NMS approach\n\n Used parts of the original code:\n - Parts of the comunication with the NCS stick\n - Output extraction for the Yolo network output\n \"\"\"\n class _YoloParams:\n \"\"\"\n Class to store params of yolo layers\n \"\"\"\n def __init__(self, param, side):\n self.num = 3 if 'num' not in param else int(param['num'])\n self.coords = 4 if 'coords' not in param else int(param['coords'])\n self.classes = 2 if 'classes' not in param else int(param['classes'])\n self.anchors = [10.0, 13.0, 16.0, 30.0, 33.0, 23.0, 30.0, 61.0, 62.0, 45.0, 59.0, 119.0, 116.0, 90.0, 156.0,\n 198.0,\n 373.0, 326.0] if 'anchors' not in param else [float(a) for a in param['anchors'].split(',')]\n\n if 'mask' in param:\n mask = [int(idx) for idx in param['mask'].split(',')]\n self.num = len(mask)\n\n maskedAnchors = []\n for idx in mask:\n maskedAnchors += [self.anchors[idx * 2], self.anchors[idx * 2 + 1]]\n self.anchors = maskedAnchors\n\n self.side = side\n self.isYoloV3 = 'mask' in param # Weak way to determine but the only one.\n\n\n def __init__(self, config, model_path):\n # Init parent constructor\n super().__init__(config, model_path)\n\n # Create model file paths\n model_xml = os.path.join(model_path, \"yolo.xml\")\n model_bin = os.path.join(model_path, \"yolo.bin\")\n\n # Plugin initialization\n rospy.logdebug(\"Creating Inference Engine...\", logger_name=\"vision_yolo\")\n\n # Reading the IR generated by the Model Optimizer (.xml and .bin files)\n rospy.logdebug(f\"Loading network files:\\n\\t{model_xml}\\n\\t{model_bin}\")\n self._net = IENetwork(model=model_xml, weights=model_bin)\n\n assert len(self._net.inputs.keys()) == 1, \"Sample supports only YOLO V3 based single input topologies\"\n\n # Preparing network inputs\n rospy.logdebug(\"Preparing inputs\")\n self._input_blob = next(iter(self._net.inputs))\n\n # Defaulf batch_size is 1\n self._net.batch_size = 1\n\n # Read and pre-process input images\n self._n, self._c, self._h, self._w = self._net.inputs[self._input_blob].shape\n\n # Device type\n device = \"MYRIAD\"\n\n # Loading model to the plugin\n rospy.logdebug(\"Loading model to the plugin\", logger_name=\"vision_yolo\")\n self._exec_net = ie.load_network(network=self._net, num_requests=2, device_name=device)\n\n def _entry_index(self, side, coord, classes, location, entry):\n \"\"\"\n Calculates the index of a yolo object.\n \"\"\"\n side_power_2 = side ** 2\n n = location // side_power_2\n loc = location % side_power_2\n return int(side_power_2 * (n * (coord + classes + 1) + entry) + loc)\n\n def _parse_yolo_region(self, blob, resized_image_shape, original_im_shape, params, threshold):\n \"\"\"\n Parses bounding boxes out of an yolo output layer.\n\n :param blob: Yolo layer output blob\n :param resized_image_shape: Yolo input image shape\n :param original_im_shape: Vision image shape\n :param params: Layer parameters\n :param threshold: Yolo bounding box threshold\n :return: List of bounding boxes\n \"\"\"\n # Validating output parameters\n _, _, out_blob_h, out_blob_w = blob.shape\n assert out_blob_w == out_blob_h, \\\n f\"Invalid size of output blob. It should be in NCHW layout and height should be equal to width. Current height: '{out_blob_h}', current width = '{out_blob_w}'\"\n\n # Extracting layer parameters\n original_image_height, original_image_width = original_im_shape\n resized_image_h, resized_image_w = resized_image_shape\n objects = list()\n predictions = blob.flatten()\n side_square = params.side ** 2\n\n # Parsing YOLO Region output\n for i in range(side_square):\n row = i // params.side\n col = i % params.side\n for n in range(params.num):\n obj_index = self._entry_index(params.side, params.coords, params.classes, n * side_square + i, params.coords)\n scale = predictions[obj_index]\n # Skip unrealistic boxes\n if scale < threshold:\n continue\n box_index = self._entry_index(params.side, params.coords, params.classes, n * side_square + i, 0)\n # Network produces location predictions in absolute coordinates of feature maps.\n # Scale it to relative coordinates.\n x = (col + predictions[box_index + 0 * side_square]) / params.side\n y = (row + predictions[box_index + 1 * side_square]) / params.side\n # Value for exp might be a very large number, so the following construction is used here\n try:\n w_exp = exp(predictions[box_index + 2 * side_square])\n h_exp = exp(predictions[box_index + 3 * side_square])\n except OverflowError:\n continue\n # Depending on topology we need to normalize sizes by feature maps (up to YOLOv3) or by input shape (YOLOv3)\n w = w_exp * params.anchors[2 * n] / (resized_image_w if params.isYoloV3 else params.side)\n h = h_exp * params.anchors[2 * n + 1] / (resized_image_h if params.isYoloV3 else params.side)\n # Iterate over classes\n for j in range(params.classes):\n class_index = self._entry_index(params.side, params.coords, params.classes, n * side_square + i,\n params.coords + 1 + j)\n confidence = scale * predictions[class_index]\n # Skip box if confidence in class is too low\n if confidence < threshold:\n continue\n h = int(h * original_image_height)\n w = int(w * original_image_width)\n x = x * original_image_width - w / 2\n y = y * original_image_height - h / 2\n list_of_coordinates = [int(x), int(y), int(w), int(h)]\n # Convert to int\n objects.append([list_of_coordinates, float(confidence), j])\n return objects\n\n def predict(self):\n if self._candidates is None or not self._caching:\n # Set up variables\n self._candidates = defaultdict(list)\n\n rospy.logdebug(\"Starting inference...\", logger_name=\"vision_yolo\")\n\n # Set request id for the stick. Since we only make one call at a time, we use a static parameter.\n request_id = 1\n # Resize image to yolo input size\n in_frame = cv2.resize(self._image, (self._w, self._h))\n\n # resize input_frame to network size\n in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW\n in_frame = in_frame.reshape((self._n, self._c, self._h, self._w))\n\n # Start inference\n self._exec_net.start_async(request_id=request_id, inputs={self._input_blob: in_frame})\n\n # Collecting object detection results\n detections = list()\n # Create barrier. This lets all following processing steps wait until the prediction is calculated.\n if self._exec_net.requests[request_id].wait(-1) == 0:\n # Get output\n output = self._exec_net.requests[request_id].output_blobs\n # Iterate over output layers\n for layer_name, out_blob in output.items():\n buff = out_blob.buffer\n # Reshape output layer\n out_blob = buff.reshape(self._net.layers[self._net.layers[layer_name].parents[0]].out_data[0].shape)\n # Create layer params object\n layer_params = self._YoloParams(self._net.layers[layer_name].params, out_blob.shape[2])\n # Parse yolo bounding boxes out of output blob\n detections.extend(\n self._parse_yolo_region(\n out_blob,\n in_frame.shape[2:],\n self._image.shape[:-1],\n layer_params,\n self._confidence_threshold))\n\n if detections:\n # Transpose detections\n boxes, confidences, class_ids = list(map(list, zip(*detections)))\n # Non-maximum Suppression. This effectively chooses one bounding box if multiple are laying over each other\n box_indices = cv2.dnn.NMSBoxes(boxes, confidences, self._confidence_threshold, self._nms_threshold)\n # Iterate over filtered boxes\n for index in box_indices:\n # Get id\n index = index[0]\n # Get box\n box = boxes[index]\n # Convert the box position/size to int\n box = list(map(int, box))\n # Create the candidate\n c = Candidate(*box, confidences[index])\n # Append candidate to the right list depending on the class\n class_id = class_ids[index]\n class_name = self._class_names[class_id]\n self._candidates[class_name].append(c)\n\n\nclass YoloDetector(CandidateFinder):\n \"\"\"\n An abstract object detector using the yolo neural network.\n This layer connects a single YOLO network with multiple candidate finders for the different classes,\n \"\"\"\n def __init__(self, config, yolo):\n \"\"\"\n Constructor for the YoloDetector.\n\n :param config: The vision config\n :param yolo: An YoloHandler implementation that runs the yolo network\n \"\"\"\n self._config = config\n self._yolo = yolo\n\n def set_image(self, image):\n \"\"\"\n Set a image for yolo. This is cached.\n\n :param image: current vision image\n \"\"\"\n self._yolo.set_image(image)\n\n @abc.abstractmethod\n def get_candidates(self):\n \"\"\"\n :return: all found candidates\n \"\"\"\n raise NotImplementedError\n\n def compute(self):\n \"\"\"\n Runs the yolo network\n \"\"\"\n self._yolo.predict()\n\nclass YoloBallDetector(YoloDetector):\n \"\"\"\n A ball detector using the yolo neural network.\n This layer connects a single YOLO network with multiple candidate finders for the different classes,\n in this case the ball class.\n \"\"\"\n def __init__(self, config, yolo):\n super().__init__(config, yolo)\n\n def get_candidates(self):\n \"\"\"\n :return: all found ball candidates\n \"\"\"\n return self._yolo.get_candidates(\"ball\")\n\nclass YoloGoalpostDetector(YoloDetector):\n \"\"\"\n A goalpost detector using the yolo neural network.\n This layer connects a single YOLO network with multiple candidate finders for the different classes,\n in this case the goalpost class.\n \"\"\"\n def __init__(self, config, yolo):\n super().__init__(config, yolo)\n\n def get_candidates(self):\n \"\"\"\n :return: all found goalpost candidates\n \"\"\"\n return self._yolo.get_candidates(\"goalpost\")\n\n\nclass YoloRobotDetector(YoloDetector):\n \"\"\"\n A robot detector using the yolo neural network.\n This layer connects a single YOLO network with multiple candidate finders for the different classes,\n in this case the robot class.\n \"\"\"\n def __init__(self, config, yolo):\n super().__init__(config, yolo)\n\n def get_candidates(self):\n \"\"\"\n :return: all found robot candidates\n \"\"\"\n return self._yolo.get_candidates(\"robot\")\n\nclass YoloXIntersectionDetector(YoloDetector):\n \"\"\"\n A X-Intersection detector using the yolo neural network.\n This layer connects a single YOLO network with multiple candidate finders for the different classes,\n in this case the X-Intersection class.\n \"\"\"\n def __init__(self, config, yolo):\n super().__init__(config, yolo)\n\n def get_candidates(self):\n \"\"\"\n :return: all found X-Intersection candidates\n \"\"\"\n return self._yolo.get_candidates(\"X-Intersection\")\n\n\nclass YoloLIntersectionDetector(YoloDetector):\n \"\"\"\n A L-Intersection detector using the yolo neural network.\n This layer connects a single YOLO network with multiple candidate finders for the different classes,\n in this case the L-Intersection class.\n \"\"\"\n def __init__(self, config, yolo):\n super().__init__(config, yolo)\n\n def get_candidates(self):\n \"\"\"\n :return: all found L-Intersection candidates\n \"\"\"\n return self._yolo.get_candidates(\"L-Intersection\")\n\n\nclass YoloTIntersectionDetector(YoloDetector):\n \"\"\"\n A T-Intersection detector using the yolo neural network.\n This layer connects a single YOLO network with multiple candidate finders for the different classes,\n in this case the T-Intersection class.\n \"\"\"\n def __init__(self, config, yolo):\n super().__init__(config, yolo)\n\n def get_candidates(self):\n \"\"\"\n :return: all found T-Intersection candidates\n \"\"\"\n return self._yolo.get_candidates(\"T-Intersection\")\n"
] |
[
[
"numpy.argmax"
]
] |
mehtamohit013/ITU-Challenge
|
[
"2a60de6c9730f85c797c6c2d0eac1627932d41ac"
] |
[
"model_cur.py"
] |
[
"import torch.nn.functional as F\nimport torch.nn as nn\nimport torch\nimport torch.optim as optim\nimport numpy as np\nimport math\nfrom torch.nn import init\n\n\nclass NoisyLinear(nn.Module):\n \"\"\"Factorised Gaussian NoisyNet\"\"\"\n\n def __init__(self, in_features, out_features, sigma0=0.5):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = nn.Parameter(torch.Tensor(out_features, in_features))\n self.bias = nn.Parameter(torch.Tensor(out_features))\n self.noisy_weight = nn.Parameter(\n torch.Tensor(out_features, in_features))\n self.noisy_bias = nn.Parameter(torch.Tensor(out_features))\n self.noise_std = sigma0 / math.sqrt(self.in_features)\n\n self.reset_parameters()\n self.register_noise()\n\n def register_noise(self):\n in_noise = torch.FloatTensor(self.in_features)\n out_noise = torch.FloatTensor(self.out_features)\n noise = torch.FloatTensor(self.out_features, self.in_features)\n self.register_buffer('in_noise', in_noise)\n self.register_buffer('out_noise', out_noise)\n self.register_buffer('noise', noise)\n\n def sample_noise(self):\n self.in_noise.normal_(0, self.noise_std)\n self.out_noise.normal_(0, self.noise_std)\n self.noise = torch.mm(\n self.out_noise.view(-1, 1), self.in_noise.view(1, -1))\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n self.noisy_weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n self.noisy_bias.data.uniform_(-stdv, stdv)\n\n def forward(self, x):\n \"\"\"\n Note: noise will be updated if x is not volatile\n \"\"\"\n normal_y = nn.functional.linear(x, self.weight, self.bias)\n if self.training:\n # update the noise once per update\n self.sample_noise()\n\n noisy_weight = self.noisy_weight * self.noise\n noisy_bias = self.noisy_bias * self.out_noise\n noisy_y = nn.functional.linear(x, noisy_weight, noisy_bias)\n return noisy_y + normal_y\n\n def __repr__(self):\n return self.__class__.__name__ + '(' \\\n + 'in_features=' + str(self.in_features) \\\n + ', out_features=' + str(self.out_features) + ')'\n\n\nclass Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n\nclass CnnActorCriticNetwork(nn.Module):\n def __init__(self, input_size, output_size, use_noisy_net=False):\n super(CnnActorCriticNetwork, self).__init__()\n\n if use_noisy_net:\n print('use NoisyNet')\n linear = NoisyLinear\n else:\n linear = nn.Linear\n\n self.feature = nn.Sequential(\n nn.Conv2d(\n in_channels=4,\n out_channels=32,\n kernel_size=8,\n stride=4),\n nn.LeakyReLU(),\n nn.Conv2d(\n in_channels=32,\n out_channels=64,\n kernel_size=4,\n stride=2),\n nn.LeakyReLU(),\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1),\n nn.LeakyReLU(),\n Flatten(),\n linear(\n 7 * 7 * 64,\n 512),\n nn.LeakyReLU()\n )\n\n self.actor = nn.Sequential(\n linear(512, 512),\n nn.LeakyReLU(),\n linear(512, output_size)\n )\n\n self.critic = nn.Sequential(\n linear(512, 512),\n nn.LeakyReLU(),\n linear(512, 1)\n )\n\n for p in self.modules():\n if isinstance(p, nn.Conv2d):\n init.orthogonal_(p.weight, np.sqrt(2))\n p.bias.data.zero_()\n\n if isinstance(p, nn.Linear):\n init.orthogonal_(p.weight, np.sqrt(2))\n p.bias.data.zero_()\n\n for i in range(len(self.actor)):\n if type(self.actor[i]) == nn.Linear:\n init.orthogonal_(self.actor[i].weight, 0.01)\n self.actor[i].bias.data.zero_()\n\n for i in range(len(self.critic)):\n if type(self.critic[i]) == nn.Linear:\n init.orthogonal_(self.critic[i].weight, 0.01)\n self.critic[i].bias.data.zero_()\n\n def forward(self, state):\n x = self.feature(state)\n policy = self.actor(x)\n value = self.critic(x)\n return policy, value\n\n\nclass ICMModel(nn.Module):\n def __init__(self, input_size, output_size, use_cuda=True):\n super(ICMModel, self).__init__()\n\n self.input_size = input_size\n self.output_size = output_size\n self.device = torch.device('cuda' if use_cuda else 'cpu')\n\n feature_output = 7 * 7 * 64\n self.feature = nn.Sequential(\n nn.Conv2d(\n in_channels=4,\n out_channels=32,\n kernel_size=8,\n stride=4),\n nn.LeakyReLU(),\n nn.Conv2d(\n in_channels=32,\n out_channels=64,\n kernel_size=4,\n stride=2),\n nn.LeakyReLU(),\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1),\n nn.LeakyReLU(),\n Flatten(),\n nn.Linear(feature_output, 512)\n )\n\n self.inverse_net = nn.Sequential(\n nn.Linear(512 * 2, 512),\n nn.ReLU(),\n nn.Linear(512, output_size)\n )\n\n self.residual = [nn.Sequential(\n nn.Linear(output_size + 512, 512),\n nn.LeakyReLU(),\n nn.Linear(512, 512),\n ).to(self.device)] * 8\n\n self.forward_net_1 = nn.Sequential(\n nn.Linear(output_size + 512, 512),\n nn.LeakyReLU()\n )\n self.forward_net_2 = nn.Sequential(\n nn.Linear(output_size + 512, 512),\n )\n\n for p in self.modules():\n if isinstance(p, nn.Conv2d):\n init.kaiming_uniform_(p.weight)\n p.bias.data.zero_()\n\n if isinstance(p, nn.Linear):\n init.kaiming_uniform_(p.weight, a=1.0)\n p.bias.data.zero_()\n\n def forward(self, inputs):\n state, next_state, action = inputs\n\n encode_state = self.feature(state)\n encode_next_state = self.feature(next_state)\n # get pred action\n pred_action = torch.cat((encode_state, encode_next_state), 1)\n pred_action = self.inverse_net(pred_action)\n # ---------------------\n\n # get pred next state\n pred_next_state_feature_orig = torch.cat((encode_state, action), 1)\n pred_next_state_feature_orig = self.forward_net_1(pred_next_state_feature_orig)\n\n # residual\n for i in range(4):\n pred_next_state_feature = self.residual[i * 2](torch.cat((pred_next_state_feature_orig, action), 1))\n pred_next_state_feature_orig = self.residual[i * 2 + 1](\n torch.cat((pred_next_state_feature, action), 1)) + pred_next_state_feature_orig\n\n pred_next_state_feature = self.forward_net_2(torch.cat((pred_next_state_feature_orig, action), 1))\n\n real_next_state_feature = encode_next_state\n return real_next_state_feature, pred_next_state_feature, pred_action\n\nclass LinearActorCritic(nn.Module):\n def __init__(self, inputs:list, outputs:int=64*3):\n super().__init__()\n self.device = torch.device('cpu')\n\n self.flatten = nn.Flatten(start_dim=1,end_dim=-1)\n self.inp_size = np.prod(inputs)\n\n self.affine = nn.Sequential(\n self.create_linear(self.inp_size,16),\n self.create_linear(16, 32),\n self.create_linear(32,64),\n self.create_linear(64,256),\n self.create_linear(256,512)\n )\n \n self.actor_linear = nn.Sequential(\n self.create_linear(512,outputs)\n )\n\n self.critic_linear = nn.Sequential(\n self.create_linear(512,1)\n )\n\n\n \n def create_linear(self,inp:int,out:int, act='relu')-> nn.Module:\n if act == 'relu':\n return nn.Sequential(\n nn.Linear(inp,out),\n nn.ReLU()\n # nn.BatchNorm1d(out)\n )\n elif act == 'lrelu':\n return nn.Sequential(\n nn.Linear(inp,out),\n nn.LeakyReLU()\n # nn.BatchNorm1d(out)\n )\n \n def forward(self, x):\n x = x.to(self.device)\n\n x = self.flatten(x)\n x = self.affine(x)\n\n # Actor - A2C implementation\n x_action = self.actor_linear(x)\n x_action = nn.Softmax(dim=-1)(x_action)\n\n # Critic\n x_critic = self.critic_linear(x) \n\n return x_action,x_critic\n\nclass LinearICM(nn.Module):\n def __init__(self,input_size:list,output_size:int,\n use_cuda:bool=False):\n\n super().__init__()\n self.input_size = input_size\n self.inp = np.prod(self.input_size)\n self.output_size = output_size\n self.device = torch.device('cuda' if use_cuda else 'cpu')\n\n self.flatten = nn.Flatten(1,-1)\n\n self.feature = nn.Sequential(\n self.create_linear(self.inp,32, 'lrelu'),\n self.create_linear(32,64, 'lrelu'),\n self.create_linear(64,128, 'lrelu'),\n self.create_linear(128,256, 'lrelu')\n )\n\n self.inverse_net = nn.Sequential(\n self.create_linear(256*2,256),\n nn.Linear(256,output_size)\n )\n\n self.forward_1 = self.create_linear(output_size+256,256)\n self.forward_2 = nn.Linear(output_size+256,256)\n\n self.residual = [nn.Sequential(\n self.create_linear(output_size+256,256, 'lrelu'),\n nn.Linear(256, 256),\n ).to(self.device)] * 8\n\n def forward(self,inputs):\n state, next_state, action = inputs\n\n state = self.flatten(state)\n next_state = self.flatten(next_state)\n\n encode_state = self.feature(state)\n encode_next_state = self.feature(next_state)\n # get pred action\n pred_action = torch.cat((encode_state, encode_next_state), 1)\n pred_action = self.inverse_net(pred_action)\n # ---------------------\n\n # get pred next state\n pred_next_state_feature_orig = torch.cat((encode_state, action), 1)\n pred_next_state_feature_orig = self.forward_1(pred_next_state_feature_orig)\n\n # residual\n for i in range(4):\n pred_next_state_feature = self.residual[i * 2](torch.cat((pred_next_state_feature_orig, action), 1))\n pred_next_state_feature_orig = self.residual[i * 2 + 1](\n torch.cat((pred_next_state_feature, action), 1)) + pred_next_state_feature_orig\n\n pred_next_state_feature = self.forward_2(torch.cat((pred_next_state_feature_orig, action), 1))\n\n real_next_state_feature = encode_next_state\n return real_next_state_feature, pred_next_state_feature, pred_action\n\n\n\n def create_linear(self,inp:int,out:int, act='relu')-> nn.Module:\n if act == 'relu':\n return nn.Sequential(\n nn.Linear(inp,out),\n nn.ReLU()\n # nn.BatchNorm1d(out)\n )\n elif act == 'lrelu':\n return nn.Sequential(\n nn.Linear(inp,out),\n nn.LeakyReLU()\n # nn.BatchNorm1d(out)\n )\n # nn.BatchNorm1d(out)\n"
] |
[
[
"torch.nn.Softmax",
"numpy.sqrt",
"torch.Tensor",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.Flatten",
"torch.nn.init.kaiming_uniform_",
"torch.nn.Linear",
"torch.FloatTensor",
"torch.nn.LeakyReLU",
"numpy.prod",
"torch.nn.init.orthogonal_",
"torch.device",
"torch.nn.ReLU",
"torch.nn.functional.linear"
]
] |
gasparka/oniapy
|
[
"a58f68244c730041a034b91fd417bd3968d46cc7"
] |
[
"oniapy/hello2.py"
] |
[
"''' Present an interactive function explorer with slider widgets.\n\nScrub the sliders to change the properties of the ``sin`` curve, or\ntype into the title text box to update the title of the plot.\n\nUse the ``bokeh serve`` command to run the example by executing:\n\n bokeh serve sliders.py\n\nat your command prompt. Then navigate to the URL\n\n http://localhost:5006/sliders\n\nin your browser.\n\n'''\nimport numpy as np\n\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import row, widgetbox\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.widgets import Slider, TextInput\nfrom bokeh.plotting import figure\n\n# Set up data\nN = 200\nx = np.linspace(0, 4*np.pi, N)\ny = np.sin(x)\nsource = ColumnDataSource(data=dict(x=x, y=y))\n\n\n# Set up plot\nplot = figure(plot_height=400, plot_width=400, title=\"my sine wave\",\n tools=\"crosshair,pan,reset,save,wheel_zoom\",\n x_range=[0, 4*np.pi], y_range=[-2.5, 2.5])\n\nplot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)\n\n\n# Set up widgets\ntext = TextInput(title=\"title\", value='my sine wave')\noffset = Slider(title=\"offset\", value=0.0, start=-5.0, end=5.0, step=0.1)\namplitude = Slider(title=\"amplitude\", value=1.0, start=-5.0, end=5.0)\nphase = Slider(title=\"phase\", value=0.0, start=0.0, end=2*np.pi)\nfreq = Slider(title=\"frequency\", value=1.0, start=0.1, end=5.1)\n\n\n# Set up callbacks\ndef update_title(attrname, old, new):\n plot.title.text = text.value\n\ntext.on_change('value', update_title)\n\nclass Update:\n def __init__(self):\n self.c = 0\n\n def __call__(self, *args, **kwargs):\n plot.y_range.start = 0 + self.c\n plot.y_range.end = 100 + self.c\n self.c -= .1\n\ndef update_data(attrname, old, new):\n\n # Get the current slider values\n a = amplitude.value\n b = offset.value\n w = phase.value\n k = freq.value\n\n # Generate the new curve\n x = np.linspace(0, 4*np.pi, N)\n y = a*np.sin(k*x + w) + b\n\n source.data = dict(x=x, y=y)\n\nfor w in [offset, amplitude, phase, freq]:\n w.on_change('value', update_data)\n\n# create a callback that will add a number in a random location\ndef callback():\n p.rect(x=1, y=p.y_range.start-20, width=17, height=50, alpha=0.5, color=\"red\", width_units=\"screen\",\n height_units=\"screen\")\n\n# add a button widget and configure with the call back\nbutton = Button(label=\"The KEY\")\nbutton.on_click(callback)\n\n\n# Set up layouts and add to document\ninputs = widgetbox(text, offset, amplitude, phase, freq)\n\n\n\ncurdoc().add_periodic_callback(Update(), 10)\ncurdoc().add_root(row(inputs, plot, width=800))\ncurdoc().title = \"Sliders\"\n"
] |
[
[
"numpy.linspace",
"numpy.sin"
]
] |
aravindrajamani/final-year-project
|
[
"e9159b4e9fe7eed11a7a245044106f574518bab1"
] |
[
"Facemask_trainer.py"
] |
[
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.applications import MobileNetV2\nfrom tensorflow.keras.layers import AveragePooling2D\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.preprocessing.image import load_img\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom imutils import paths\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n\nINIT_LR = 1e-4\nEPOCHS = 20\nBS = 32\n\nDIRECTORY = r\"E:\\Final_year_project\\dataset\"\nCATEGORIES = [\"with_mask\", \"without_mask\"]\n\nprint(\"[INFO] loading images...\")\n\ndata = []\nlabels = []\n\nfor category in CATEGORIES:\n path = os.path.join(DIRECTORY, category)\n for img in os.listdir(path):\n \timg_path = os.path.join(path, img)\n \timage = load_img(img_path, target_size=(224, 224))\n \timage = img_to_array(image)\n \timage = preprocess_input(image)\n\n \tdata.append(image)\n \tlabels.append(category)\n\nlb = LabelBinarizer()\nlabels = lb.fit_transform(labels)\nlabels = to_categorical(labels)\n\ndata = np.array(data, dtype=\"float32\")\nlabels = np.array(labels)\n\n(trainX, testX, trainY, testY) = train_test_split(data, labels,\n\ttest_size=0.20, stratify=labels, random_state=42)\n\n\naug = ImageDataGenerator(\n\trotation_range=20,\n\tzoom_range=0.15,\n\twidth_shift_range=0.2,\n\theight_shift_range=0.2,\n\tshear_range=0.15,\n\thorizontal_flip=True,\n\tfill_mode=\"nearest\")\n\nbaseModel = MobileNetV2(weights=\"imagenet\", include_top=False,\n\tinput_tensor=Input(shape=(224, 224, 3)))\n\n\n\nheadModel = baseModel.output\nheadModel = AveragePooling2D(pool_size=(7, 7))(headModel)\nheadModel = Flatten(name=\"flatten\")(headModel)\nheadModel = Dense(128, activation=\"relu\")(headModel)\nheadModel = Dropout(0.5)(headModel)\nheadModel = Dense(2, activation=\"softmax\")(headModel)\n\nmodel = Model(inputs=baseModel.input, outputs=headModel)\n\nfor layer in baseModel.layers:\n\tlayer.trainable = False\n\nprint(\"[INFO] compiling model...\")\nopt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)\nmodel.compile(loss=\"binary_crossentropy\", optimizer=opt,\n\tmetrics=[\"accuracy\"])\n\nprint(\"[INFO] training head...\")\nH = model.fit(\n\taug.flow(trainX, trainY, batch_size=BS),\n\tsteps_per_epoch=len(trainX) // BS,\n\tvalidation_data=(testX, testY),\n\tvalidation_steps=len(testX) // BS,\n\tepochs=EPOCHS)\n\nprint(\"[INFO] evaluating network...\")\npredIdxs = model.predict(testX, batch_size=BS)\n\npredIdxs = np.argmax(predIdxs, axis=1)\n\nprint(classification_report(testY.argmax(axis=1), predIdxs,\n\ttarget_names=lb.classes_))\nprint(\"[INFO] saving mask detector model...\")\nmodel.save(\"mask_detector.model\", save_format=\"h5\")\n\nN = EPOCHS\nplt.style.use(\"ggplot\")\nplt.figure()\nplt.plot(np.arange(0, N), H.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, N), H.history[\"val_loss\"], label=\"val_loss\")\nplt.plot(np.arange(0, N), H.history[\"accuracy\"], label=\"train_acc\")\nplt.plot(np.arange(0, N), H.history[\"val_accuracy\"], label=\"val_acc\")\nplt.title(\"Training Loss and Accuracy\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend(loc=\"lower left\")\nplt.savefig(\"graph.png\")\n"
] |
[
[
"matplotlib.pyplot.legend",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.arange",
"tensorflow.keras.applications.mobilenet_v2.preprocess_input",
"numpy.argmax",
"sklearn.preprocessing.LabelBinarizer",
"tensorflow.keras.layers.Flatten",
"matplotlib.pyplot.style.use",
"tensorflow.keras.preprocessing.image.img_to_array",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.AveragePooling2D",
"matplotlib.pyplot.title",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.layers.Input"
]
] |
phy-q/benchmark
|
[
"0a0fd31614039f3620369582017d9bd0a1c88b11"
] |
[
"sciencebirdsagents/HeuristicAgents/PigShooter.py"
] |
[
"import random\n\nimport numpy as np\n\nfrom SBAgent import SBAgent\nfrom SBEnvironment.SBEnvironmentWrapper import SBEnvironmentWrapper\nfrom StateReader.SymbolicStateDevReader import SymbolicStateDevReader\nfrom StateReader.game_object import GameObjectType\nfrom Utils.point2D import Point2D\nfrom Utils.trajectory_planner import SimpleTrajectoryPlanner\n\n\nclass PigShooter(SBAgent):\n def __init__(self, env: SBEnvironmentWrapper, level_selection_function, id: int = 28888, level_list: list = []):\n SBAgent.__init__(self, level_list=level_list, env=env, id=id)\n # initialise a record of the levels to the agent\n\n self.id = id\n self.tp = SimpleTrajectoryPlanner()\n self.model = np.loadtxt(\"Utils/model\", delimiter=\",\")\n self.target_class = list(map(lambda x: x.replace(\"\\n\", \"\"), open('Utils/target_class').readlines()))\n self.env = env # used to sample random action\n self.level_selection_function = level_selection_function\n self.state_representation_type = 'symbolic'\n\n def select_level(self):\n # you can choose to implement this by yourself, or just get it from the LevelSelectionSchema\n idx = self.level_selection_function(self.total_score_record)\n return idx\n\n def select_action(self, state, mode=None):\n symbolic_state_reader = SymbolicStateDevReader(state, self.model, self.target_class)\n if not symbolic_state_reader.is_vaild():\n print(\"no pig or birds found, just shoot\")\n return self.env.action_space.sample()\n\n sling = symbolic_state_reader.find_slingshot()[0]\n sling.width, sling.height = sling.height, sling.width\n\n # get all the pigs\n pigs = symbolic_state_reader.find_pigs()\n\n # if there is a sling, then play, otherwise skip.\n if sling:\n # If there are pigs, we pick up a pig randomly and shoot it.\n if pigs:\n release_point = None\n # random pick up a pig\n pig = pigs[random.randint(0, len(pigs) - 1)]\n temp_pt = pig.get_centre_point()\n\n # TODO change StateReader.cv_utils.Rectangle\n # to be more intuitive\n _tpt = Point2D(temp_pt[1], temp_pt[0])\n\n pts = self.tp.estimate_launch_point(sling, _tpt)\n\n if not pts:\n # Add logic to deal with unreachable target\n release_point = Point2D(-600, 560)\n\n elif len(pts) == 1:\n release_point = pts[0]\n elif len(pts) == 2:\n if random.randint(0, 1) == 0:\n release_point = pts[1]\n else:\n release_point = pts[0]\n\n # Get the release point from the trajectory prediction module\n if release_point:\n self.tp.get_release_angle(sling, release_point)\n\n birds = symbolic_state_reader.find_birds()\n bird_on_sling = symbolic_state_reader.find_bird_on_sling(birds, sling)\n bird_type = bird_on_sling.type\n\n if bird_type == GameObjectType.REDBIRD:\n tap_interval = 0 # start of trajectory\n elif bird_type == GameObjectType.YELLOWBIRD:\n tap_interval = 65 + random.randint(0, 24) # 65-90% of the way\n elif bird_type == GameObjectType.WHITEBIRD:\n tap_interval = 50 + random.randint(0, 19) # 50-70% of the way\n elif bird_type == GameObjectType.BLACKBIRD:\n tap_interval = 0 # do not tap black bird\n elif bird_type == GameObjectType.BLUEBIRD:\n tap_interval = 65 + random.randint(0, 19) # 65-85% of the way\n else:\n tap_interval = 60\n\n tap_time = self.tp.get_tap_time(sling, release_point, _tpt, tap_interval)\n shot = [release_point.X - sling.X, sling.Y - release_point.Y, tap_time]\n return shot\n else:\n return self.env.action_space.sample(1)\n\n print('didn\\'t find slingshot, just shoot')\n return self.env.action_space.sample()\n"
] |
[
[
"numpy.loadtxt"
]
] |
vishalbelsare/cgpm
|
[
"56a481829448bddc9cdfebd42f65023287d5b7c7"
] |
[
"tests/test_mvkde.py"
] |
[
"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2015-2016 MIT Probabilistic Computing Project\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\n\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pytest\n\nfrom scipy.stats import chisquare\nfrom scipy.stats import ks_2samp\n\nfrom cgpm.kde.mvkde import MultivariateKde\nfrom cgpm.uncorrelated.linear import Linear\nfrom cgpm.utils import general as gu\nfrom cgpm.utils import test as tu\n\n\nO = 'outputs'\nST = 'stattypes'\nSA = 'statargs'\nN = 'numerical'\nC = 'nominal'\n\n\ndef test_initialize():\n # This test ensures that MvKde raises on bad initialize arguments.\n # Typical initialization.\n MultivariateKde(\n outputs=[0, 1], inputs=None,\n distargs={O: {ST: [N, C], SA: [{}, {'k': 2}]}})\n # No inputs allowed.\n with pytest.raises(ValueError):\n MultivariateKde(\n outputs=[0, 1], inputs=[2],\n distargs={O: {ST:[N, C], SA: [{}, {'k': 2}]}})\n # At least one output.\n with pytest.raises(ValueError):\n MultivariateKde(\n outputs=[], inputs=[],\n distargs={O: {ST: [], SA:[]}})\n # Unique outputs.\n with pytest.raises(ValueError):\n MultivariateKde(\n outputs=[0, 0], inputs=None,\n distargs={O: {ST: [N, C], SA: [{}, {'k': 2}]}})\n # Ensure outputs in distargs.\n with pytest.raises(ValueError):\n MultivariateKde(\n outputs=[0, 1], inputs=None,\n distargs=None)\n with pytest.raises(ValueError):\n MultivariateKde(\n outputs=[0, 1], inputs=None,\n distargs={'output': {ST: [N, C], SA: [{}, {'k': 2}]}})\n # Ensure stattypes and statargs in distargs['outputs]'\n with pytest.raises(ValueError):\n MultivariateKde(\n outputs=[0, 1], inputs=None,\n distargs={O: {'stattype': [N, C], SA :[{}, {'k': 2}]}})\n with pytest.raises(ValueError):\n MultivariateKde(\n outputs=[0, 1], inputs=None,\n distargs={O: {ST: [N, C], 'eland': [{}, {'k': 2}]}})\n # Ensure stattypes correct length.\n with pytest.raises(ValueError):\n MultivariateKde(\n outputs=[0, 1], inputs=None,\n distargs={O: {ST: [N, C, N], SA: [{}, {'k': 2}]}})\n # Ensure statargs correct length.\n with pytest.raises(ValueError):\n MultivariateKde(\n outputs=[0, 1], inputs=None,\n distargs={O: {ST: [N, C], SA: [{}, None, {'k': 2}]}})\n # Ensure number of categories provided as k.\n with pytest.raises(ValueError):\n MultivariateKde(\n outputs=[0, 1], inputs=None,\n distargs={O: {ST: [N, C], SA: [{}, {'h': 2}]}})\n with pytest.raises(ValueError):\n MultivariateKde(\n outputs=[0, 1], inputs=None,\n distargs={O: {ST: [N, C], SA: [{}, {}]}})\n\n\ndef test_invalid_incorporate():\n # This test ensures that MvKde raises on bad incorporate.\n # No evidence.\n kde = MultivariateKde(\n outputs=[0, 1], inputs=None,\n distargs={O: {ST: [N, C], SA: [{}, {'k': 2}]}})\n # Missing query.\n with pytest.raises(ValueError):\n kde.incorporate(0, {})\n # Disallow inputs.\n with pytest.raises(ValueError):\n kde.incorporate(0, {0:1}, {1:2})\n # Unknown output var.\n with pytest.raises(ValueError):\n kde.incorporate(0, {0:1, 2:1})\n kde.incorporate(0, {0:1, 1:1})\n # Duplicate rowid.\n with pytest.raises(ValueError):\n kde.incorporate(0, {1:1})\n # Unspecified entry 0 should be nan\n kde.incorporate(1, {1:1})\n assert np.isnan(kde.data[1][0])\n\n\ndef uni_normal_1(N, rng):\n return rng.normal(-1, 1, size=N)\ndef uni_normal_2(N, rng):\n return rng.normal(-1, 4, size=N)\ndef uni_normal_3(N, rng):\n return rng.normal(-1, 16, size=N)\ndef uni_normal_4(N, rng):\n return rng.normal(10, 1, size=N)\ndef uni_normal_5(N, rng):\n return rng.normal(10, 4, size=N)\ndef uni_normal_6(N, rng):\n return rng.normal(10, 16, size=N)\ndef uni_normal_8(N, rng):\n return rng.normal(-13, 4, size=N)\ndef uni_normal_9(N, rng):\n return rng.normal(-13, 16, size=N)\ndef bi_normal_1(N, rng):\n counts = rng.multinomial(N, pvals=[.7,.3])\n return np.hstack((\n uni_normal_1(counts[0], rng),\n uni_normal_2(counts[1], rng)))\ndef bi_normal_2(N, rng):\n counts = rng.multinomial(N, pvals=[.6,.4])\n return np.hstack((\n uni_normal_5(counts[0], rng),\n uni_normal_8(counts[1], rng)))\ndef bi_normal_3(N, rng):\n counts = rng.multinomial(N, pvals=[.5,.5])\n return np.hstack((\n uni_normal_2(counts[0], rng),\n uni_normal_8(counts[1], rng)))\ndef bi_normal_4(N, rng):\n counts = rng.multinomial(N, pvals=[.5,.5])\n return np.hstack((\n uni_normal_6(counts[0], rng),\n uni_normal_1(counts[1], rng)))\ndef bi_normal_5(N, rng):\n counts = rng.multinomial(N, pvals=[.65,.45])\n return np.hstack((\n uni_normal_1(counts[0], rng),\n uni_normal_4(counts[1], rng)))\n\nSAMPLES = [\n uni_normal_1,\n uni_normal_2,\n uni_normal_3,\n uni_normal_4,\n uni_normal_5,\n uni_normal_6,\n uni_normal_8,\n uni_normal_9,\n bi_normal_1,\n bi_normal_2,\n bi_normal_3,\n bi_normal_4,\n bi_normal_5,\n]\n\[email protected]('i', xrange(len(SAMPLES)))\ndef test_univariate_two_sample(i):\n # This test ensures posterior sampling of uni/bimodal dists on R. When the\n # plot is shown, a density curve overlays the samples which is useful for\n # seeing that logpdf/simulate agree.\n N_SAMPLES = 100\n\n rng = gu.gen_rng(2)\n # Synthetic samples.\n samples_train = SAMPLES[i](N_SAMPLES, rng)\n samples_test = SAMPLES[i](N_SAMPLES, rng)\n # Univariate KDE.\n kde = MultivariateKde([3], None, distargs={O: {ST: [N], SA:[{}]}}, rng=rng)\n # Incorporate observations.\n for rowid, x in enumerate(samples_train):\n kde.incorporate(rowid, {3: x})\n # Run inference.\n kde.transition()\n # Generate posterior samples.\n samples_gen = [s[3] for s in kde.simulate(-1, [3], N=N_SAMPLES)]\n # Plot comparison of all train, test, and generated samples.\n fig, ax = plt.subplots()\n ax.scatter(samples_train, [0]*len(samples_train), color='b', label='Train')\n ax.scatter(samples_gen, [1]*len(samples_gen), color='r', label='KDE')\n ax.scatter(samples_test, [2]*len(samples_test), color='g', label='Test')\n # Overlay the density function.\n xs = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 200)\n pdfs = [kde.logpdf(-1, {3: x}) for x in xs]\n # Convert the pdfs from the range to 1 to 1.5 by rescaling.\n pdfs_plot = np.exp(pdfs)+1\n pdfs_plot = (pdfs_plot/max(pdfs_plot)) * 1.5\n ax.plot(xs, pdfs_plot, color='k')\n # Clear up some labels.\n ax.set_title('Univariate KDE Posterior versus Generator')\n ax.set_xlabel('x')\n ax.set_yticklabels([])\n # Show the plot.\n ax.grid()\n plt.close()\n # KS test\n _, p = ks_2samp(samples_test, samples_gen)\n assert .05 < p\n\n\[email protected]('noise', [.1, .3, .7])\ndef test_bivariate_conditional_two_sample(noise):\n # This test checks joint and conditional simulation of a bivarate normal\n # with (correlation 1-noise). The most informative use is plotting but\n # there is a numerical test for the conditional distributions.\n N_SAMPLES = 100\n\n rng = gu.gen_rng(2)\n # Synthetic samples.\n linear = Linear(outputs=[0,1], noise=noise, rng=rng)\n samples_train = np.asarray(\n [[s[0], s[1]] for s in linear.simulate(-1, [0,1], N=N_SAMPLES)])\n # Bivariate KDE.\n kde = MultivariateKde(\n [0,1], None, distargs={O: {ST: [N,N], SA:[{},{}]}}, rng=rng)\n # Incorporate observations.\n for rowid, x in enumerate(samples_train):\n kde.incorporate(rowid, {0: x[0], 1: x[1]})\n # Run inference.\n kde.transition()\n # Generate posterior samples from the joint.\n samples_gen = np.asarray(\n [[s[0],s[1]] for s in kde.simulate(-1, [0,1], N=N_SAMPLES)])\n # Plot comparisons of the joint.\n fig, ax = plt.subplots(nrows=1, ncols=2)\n plot_data = zip(\n ax, ['b', 'r'], ['Train', 'KDE'], [samples_train, samples_gen])\n for (a, c, l, s) in plot_data:\n a.scatter(s[:,0], s[:,1], color=c, label=l)\n a.grid()\n a.legend(framealpha=0)\n # Generate posterior samples from the conditional.\n xs = np.linspace(-3, 3, 100)\n cond_samples_a = np.asarray(\n [[s[1] for s in linear.simulate(-1, [1], {0: x0}, N=N_SAMPLES)]\n for x0 in xs])\n cond_samples_b = np.asarray(\n [[s[1] for s in kde.simulate(-1, [1], {0: x0}, N=N_SAMPLES)]\n for x0 in xs])\n # Plot the mean value on the same plots.\n for (a, s) in zip(ax, [cond_samples_a, cond_samples_b]):\n a.plot(xs, np.mean(s, axis=1), linewidth=3, color='g')\n a.set_xlim([-5,4])\n a.set_ylim([-5,4])\n plt.close('all')\n # Perform a two sample test on the means.\n mean_a = np.mean(cond_samples_a, axis=1)\n mean_b = np.mean(cond_samples_b, axis=1)\n _, p = ks_2samp(mean_a, mean_b)\n assert .01 < p\n\n\ndef test_univariate_categorical():\n # This test generates univariate data from a nominal variable with 6 levels\n # and probability vector p_theory, and performs a chi-square test on\n # posterior samples from MvKde.\n\n rng = gu.gen_rng(2)\n N_SAMPLES = 1000\n p_theory = [.3, .1, .2, .15, .15, .1]\n samples_test = rng.choice(range(6), p=p_theory, size=N_SAMPLES)\n kde = MultivariateKde(\n [7], None, distargs={O: {ST: [C], SA:[{'k': 6}]}}, rng=rng)\n # Incorporate observations.\n for rowid, x in enumerate(samples_test):\n kde.incorporate(rowid, {7: x})\n kde.transition()\n # Posterior samples.\n samples_gen = kde.simulate(-1, [7], N=N_SAMPLES)\n f_obs = np.bincount([s[7] for s in samples_gen])\n f_exp = np.bincount(samples_test)\n _, pval = chisquare(f_obs, f_exp)\n assert 0.05 < pval\n # Get some coverage on logpdf_score.\n assert kde.logpdf_score() < 0\n\n\ndef test_noisy_permutation_categorical():\n # This test builds a synthetic bivariate distribution for variables X and Y,\n # which are both categorical(3). The relationship is y = f(X) where f is\n # the permutation (0,1,2)->(1,2,0). To introduce noise, 10 percent of the\n # samples are \"corrupted\" and do not obey the relationship. The test ensure\n # posterior simulate/logpdf target the permutation, and agree with one\n # another.\n\n rng = gu.gen_rng(22)\n N_SAMPLES = 250\n\n f_permutation = {0:1, 1:2, 2:0}\n b_permutation = {0:2, 1:0, 2:1}\n\n X = rng.choice([0,1,2], p=[.33, .33, .34], size=N_SAMPLES).astype(float)\n Y = (X+1) % 3\n\n # Corrupt 10% of the samples.\n corruption = rng.choice(\n range(N_SAMPLES), replace=False, size=int(.1*N_SAMPLES))\n for c in corruption:\n Y[c] = rng.choice([i for i in f_permutation if i!=Y[c]])\n\n # Add 2 nans.\n X[0] = np.nan\n Y[4] = np.nan\n\n samples_test = np.column_stack((X,Y))\n\n # Build MvKde.\n kde = MultivariateKde(\n [7,8], None,\n distargs={O: {ST: [C, C], SA:[{'k': 3}, {'k': 3}]}}, rng=rng)\n for rowid, x in enumerate(samples_test):\n kde.incorporate(rowid, {7:x[0], 8:x[1]})\n kde.transition()\n\n def test_sample_match(s, target):\n f_obs = np.bincount(s)\n f_exp = [90 if i==target else 5 for i in [0,1,2]]\n # Max should be the target.\n amax_obs = np.argmax(f_obs)\n amax_exp = np.argmax(f_exp)\n assert amax_obs == amax_exp\n # Noise should not account for more than .20\n n_noise = sum(f for i,f in enumerate(f_obs) if i!=amax_obs)\n frac_noise = n_noise / float(sum(f_obs))\n assert frac_noise < 0.20\n\n def test_logps_match(s, ps):\n f_obs = np.bincount(s)\n n = float(sum(f_obs))\n p_obs = f_obs / n\n _, pval = chisquare(p_obs*n, ps*n)\n assert 0.05 < pval\n\n # Generate posterior samples conditioning on 7.\n for g in [0,1,2]:\n samples = [s[8] for s in kde.simulate(-1, [8], {7: g}, N=10000)]\n test_sample_match(samples, f_permutation[g])\n logps = [kde.logpdf(-1, {8:i}, {7:g}) for i in f_permutation]\n test_logps_match(samples, np.exp(logps))\n\n # Generate posterior samples conditioning on 8.\n for g in [0,1,2]:\n samples = [s[7] for s in kde.simulate(-1, [7], {8: g}, N=10000)]\n test_sample_match(samples, b_permutation[g])\n logps = [kde.logpdf(-1, {7:i}, {8:g}) for i in f_permutation]\n test_logps_match(samples, np.exp(logps))\n\n\ndef test_transition_no_data():\n kde = MultivariateKde(\n [1], None, distargs={O: {ST: [N], SA: [{}]}}, rng=gu.gen_rng(0))\n bw = list(kde.bw)\n kde.transition()\n assert np.allclose(bw, kde.bw)\n\n\ndef test_serialize():\n rng = gu.gen_rng(1)\n\n data = rng.rand(20, 5)\n data[:10,-1] = 0\n data[10:,-1] = 1\n\n kde = MultivariateKde(\n range(5), None,\n distargs={O: {ST: [N, N, N, N, C], SA: [{},{},{},{},{'k':1}]}}, rng=rng)\n for rowid, x in enumerate(data):\n kde.incorporate(rowid, dict(zip(range(5), x)))\n kde.transition()\n\n metadata_s = json.dumps(kde.to_metadata())\n metadata_l = json.loads(metadata_s)\n\n modname = importlib.import_module(metadata_l['factory'][0])\n builder = getattr(modname, metadata_l['factory'][1])\n kde2 = builder.from_metadata(metadata_l, rng=rng)\n\n # Variable indexes.\n assert kde2.outputs == kde.outputs\n assert kde2.inputs == kde.inputs\n # Distargs.\n assert kde2.get_distargs() == kde.get_distargs()\n # Dataset.\n assert kde2.data == kde.data\n assert kde2.N == kde.N\n # Bandwidth params.\n assert np.allclose(kde2.bw, kde.bw)\n # Statistical types.\n assert kde2.stattypes == kde.stattypes\n\n\n# XXX The following three tests are very similar to test_normal_categorical. The\n# two tests can be merged easily and it should be done to reduce duplication.\n\ndef generate_real_nominal_data(N, rng=None):\n # Generates a bivariate dataset, where the first variable x is real-valued\n # and the second variable z is nominal with 6 levels. The real variable's\n # mean is determined by the value of z, where there are three means\n # corresponding to levels [(0,1), (2,3), (4,5)].\n\n if rng is None: rng = gu.gen_rng(0)\n T, Zv, Zc = tu.gen_data_table(\n N, [1], [[.3, .5, .2]], ['normal'], [None], [.95], rng=rng)\n data = np.zeros((N, 2))\n data[:,0] = T[0]\n indicators = [0, 1, 2, 3, 4, 5]\n counts = {0:0, 1:0, 2:0}\n for i in xrange(N):\n k = Zc[0][i]\n data[i,1] = 2*indicators[k] + counts[k] % 2\n counts[k] += 1\n return data, indicators\n\n\[email protected](scope='module')\ndef kde_xz():\n # Learns an MvKde on the dataset generated by generate_real_nominal_data\n # and returns the fixture for use in the next three tests.\n\n N_SAMPLES = 250\n data, indicators = generate_real_nominal_data(N_SAMPLES)\n K = MultivariateKde(\n [0,1], None,\n distargs={O: {ST: [N, C], SA:[{}, {'k': len(indicators)}]}},\n rng=gu.gen_rng(0))\n for rowid, x in enumerate(data):\n K.incorporate(rowid, {0:x[0], 1:x[1]})\n K.transition()\n return K\n\n\ndef test_joint(kde_xz):\n # Simulate from the joint distribution of x,z (see\n # generate_real_nominal_data) and perform a KS tests at each of the\n # subpopulations at the six levels of z.\n\n data = np.asarray(kde_xz.data.values())\n indicators = sorted(set(data[:,1].astype(int)))\n joint_samples = kde_xz.simulate(-1, [0,1], N=len(data))\n _, ax = plt.subplots()\n ax.set_title('Joint Simulation')\n for t in indicators:\n # Plot original data.\n data_subpop = data[data[:,1] == t]\n ax.scatter(data_subpop[:,1], data_subpop[:,0], color=gu.colors[t])\n # Plot simulated data for indicator t.\n samples_subpop = [j[0] for j in joint_samples if j[1] == t]\n ax.scatter(\n np.add([t]*len(samples_subpop), .25), samples_subpop,\n color=gu.colors[t])\n # KS test.\n _, p = ks_2samp(data_subpop[:,0], samples_subpop)\n assert .05 < p\n ax.set_xlabel('z')\n ax.set_ylabel('x')\n ax.grid()\n\n\ndef test_conditional_indicator(kde_xz):\n # Simulate from the conditional distribution of x|z (see\n # generate_real_nominal_data) and perfrom a KS tests at each of the\n # subpopulations at the six levels of z.\n\n data = np.asarray(kde_xz.data.values())\n indicators = sorted(set(data[:,1].astype(int)))\n _, ax = plt.subplots()\n ax.set_title('Conditional Simulation Of X Given Indicator Z')\n for t in indicators:\n # Plot original data.\n data_subpop = data[data[:,1] == t]\n ax.scatter(data_subpop[:,1], data_subpop[:,0], color=gu.colors[t])\n # Plot simulated data.\n samples_subpop = [s[0] for s in\n kde_xz.simulate(-1, [0], {1:t}, None, N=len(data_subpop))]\n ax.scatter(\n np.repeat(t, len(data_subpop)) + .25,\n samples_subpop, color=gu.colors[t])\n # KS test.\n _, p = ks_2samp(data_subpop[:,0], samples_subpop)\n assert .1 < p\n ax.set_xlabel('z')\n ax.set_ylabel('x')\n ax.grid()\n\n\ndef test_conditional_real(kde_xz):\n # Simulate from the conditional distribution of z|x (see\n # generate_real_nominal_data) and plot the frequencies of the simulated\n # values.\n\n data = np.asarray(kde_xz.data.values())\n indicators = sorted(set(data[:,1].astype(int)))\n fig, axes = plt.subplots(2,3)\n fig.suptitle('Conditional Simulation Of Indicator Z Given X', size=20)\n # Compute representative data sample for each indicator.\n means = [np.mean(data[data[:,1]==t], axis=0)[0] for t in indicators]\n for mean, indicator, ax in zip(means, indicators, axes.ravel('F')):\n samples_subpop = [s[1] for s in\n kde_xz.simulate(-1, [1], {0:mean}, None, N=len(data))]\n # Plot a histogram of the simulated indicator.\n ax.hist(samples_subpop, color='g', alpha=.4)\n ax.set_title('True Indicator Z %d' % indicator)\n ax.set_xlabel('Simulated Indicator Z')\n ax.set_xticks(indicators)\n ax.set_ylabel('Frequency')\n ax.set_ylim([0, ax.get_ylim()[1]+10])\n ax.grid()\n # Check that the simulated indicator agrees with true indicator.\n true_ind_a = indicator\n true_ind_b = indicator-1 if indicator % 2 else indicator+1\n counts = np.bincount(samples_subpop)\n frac = sum(counts[[true_ind_a, true_ind_b]])/float(sum(counts))\n assert .8 < frac\n"
] |
[
[
"scipy.stats.ks_2samp",
"numpy.allclose",
"numpy.linspace",
"numpy.isnan",
"matplotlib.pyplot.subplots",
"numpy.argmax",
"numpy.mean",
"numpy.bincount",
"matplotlib.pyplot.close",
"scipy.stats.chisquare",
"numpy.exp",
"numpy.zeros",
"numpy.column_stack"
]
] |
esalesky/xnmt-devel
|
[
"90598d7cc8d22a7167acf88e29df81e130fc64fc"
] |
[
"xnmt/specialized_encoders/segmenting_encoder/priors.py"
] |
[
"\"\"\"\nPriors classes that wraps us some probability distribution that can be used to\ndraw samples and calculate the probability of an event.\n\nSupports 2 methods:\n - log_ll: Returns the log likelihood of an event (occurence)\n - sample: Draw samples with a size (batch_size x size)\n\"\"\"\n\nimport math\nimport numpy as np\nfrom scipy.stats import poisson\n\nfrom xnmt.persistence import serializable_init, Serializable\nfrom xnmt.events import register_xnmt_handler, handle_xnmt_event\n\nclass Prior(object):\n def log_ll(self, event): raise NotImplementedError()\n def sample(self, size): raise NotImplementedError()\n\nclass PoissonPrior(Prior, Serializable):\n \"\"\" The poisson prior \"\"\"\n yaml_tag = '!PoissonPrior'\n @serializable_init\n def __init__(self, mu=3.3):\n self.mu = mu\n\n def log_ll(self, event):\n return math.log(poisson.pmf(event, self.mu))\n\n def sample(self, batch_size, size):\n return np.random.poisson(lam=self.mu, size=(batch_size, size))\n\nclass UniformPrior(Prior, Serializable):\n \"\"\" The uniform prior \"\"\"\n yaml_tag = '!UniformPrior'\n @serializable_init\n def __init__(self, low=0, high=1):\n self.x_diff = high - low\n\n def log_ll(self, event):\n return -math.log(self.x_diff)\n\n def sample(self, batch_size, size):\n return np.random.uniform(0, self.x_diff, size=(batch_size, size))\n\nclass GoldInputPrior(Prior, Serializable):\n \"\"\"\n This prior is based on input so there is no probability being calculated.\n The draw sample method will simply return the defined gold standard by accessing\n the defined attribute as a gold standard from the input.\n\n sample = getattr(input, \"attr_name\")\n \"\"\"\n\n yaml_tag = '!GoldInputPrior'\n\n @serializable_init\n @register_xnmt_handler\n def __init__(self, attr_name):\n self.attr_name = attr_name\n\n def log_ll(self, event):\n return 0\n\n @handle_xnmt_event\n def on_start_sent(self, src):\n self.src = src\n\n def sample(self, batch_size, size):\n return [getattr(self.src[i], self.attr_name) for i in range(batch_size)]\n\n"
] |
[
[
"numpy.random.poisson",
"numpy.random.uniform",
"scipy.stats.poisson.pmf"
]
] |
bsmith39/CS595Project
|
[
"96f4930ea2974f219432ab8b3e7f585b5554b367"
] |
[
"sirepo/template/warpvnd.py"
] |
[
"# -*- coding: utf-8 -*-\nu\"\"\"Warp VND/WARP execution template.\n\n:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.\n:license: http://www.apache.org/licenses/LICENSE-2.0.html\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom pykern import pkcollections\nfrom pykern import pkio\nfrom pykern.pkdebug import pkdc, pkdp, pkdlog\nfrom rswarp.cathode import sources\nfrom rswarp.utilities.file_utils import readparticles\nfrom scipy import constants\nfrom sirepo import simulation_db\nfrom sirepo.template import template_common\nimport h5py\nimport numpy as np\nimport os.path\nimport py.path\nimport re\n\nCOMPARISON_STEP_SIZE = 100\nSIM_TYPE = 'warpvnd'\nWANT_BROWSER_FRAME_CACHE = True\n\n_COMPARISON_FILE = 'diags/fields/electric/data00{}.h5'.format(COMPARISON_STEP_SIZE)\n_CULL_PARTICLE_SLOPE = 1e-4\n_DENSITY_FILE = 'density.npy'\n_EGUN_CURRENT_FILE = 'egun-current.npy'\n_EGUN_STATUS_FILE = 'egun-status.txt'\n_PARTICLE_PERIOD = 100\n_PARTICLE_FILE = 'particles.npy'\n_REPORT_STYLE_FIELDS = ['colorMap', 'notes', 'color']\n_SCHEMA = simulation_db.get_schema(SIM_TYPE)\n\ndef background_percent_complete(report, run_dir, is_running):\n files = _h5_file_list(run_dir, 'currentAnimation')\n if (is_running and len(files) < 2) or (not run_dir.exists()):\n return {\n 'percentComplete': 0,\n 'frameCount': 0,\n }\n if len(files) == 0:\n return {\n 'percentComplete': 100,\n 'frameCount': 0,\n 'error': 'simulation produced no frames',\n 'state': 'error',\n }\n file_index = len(files) - 1\n res = {\n 'lastUpdateTime': int(os.path.getmtime(str(files[file_index]))),\n }\n # look at 2nd to last file if running, last one may be incomplete\n if is_running:\n file_index -= 1\n data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))\n percent_complete = 0\n if data.models.simulation.egun_mode == '1':\n status_file = run_dir.join(_EGUN_STATUS_FILE)\n if status_file.exists():\n with open(str(status_file), 'r') as f:\n m = re.search('([\\d\\.]+)\\s*/\\s*(\\d+)', f.read())\n if m:\n percent_complete = float(m.group(1)) / int(m.group(2))\n egun_current_file = run_dir.join(_EGUN_CURRENT_FILE)\n if egun_current_file.exists():\n v = np.load(str(egun_current_file))\n res['egunCurrentFrameCount'] = len(v)\n else:\n percent_complete = (file_index + 1.0) * _PARTICLE_PERIOD / data.models.simulationGrid.num_steps\n\n if percent_complete < 0:\n percent_complete = 0\n elif percent_complete > 1.0:\n percent_complete = 1.0\n res['percentComplete'] = percent_complete * 100\n res['frameCount'] = file_index + 1\n return res\n\n\ndef fixup_old_data(data):\n for m in [\n 'egunCurrentAnimation',\n 'fieldReport',\n 'impactDensityAnimation',\n 'particle3d',\n 'particleAnimation',\n 'simulation',\n 'simulationGrid',\n ]:\n if m not in data['models']:\n data['models'][m] = {}\n template_common.update_model_defaults(data['models'][m], m, _SCHEMA)\n if 'joinEvery' in data['models']['particle3d']:\n del data['models']['particle3d']['joinEvery']\n for c in data['models']['conductorTypes']:\n if 'isConductor' not in c:\n c['isConductor'] = '1' if c['voltage'] > 0 else '0'\n template_common.update_model_defaults(c, 'box', _SCHEMA)\n for c in data['models']['conductors']:\n template_common.update_model_defaults(c, 'conductorPosition', _SCHEMA)\n if 'fieldComparisonReport' not in data['models']:\n grid = data['models']['simulationGrid']\n data['models']['fieldComparisonReport'] = {\n 'dimension': 'x',\n 'xCell1': int(grid['num_x'] / 3.),\n 'xCell2': int(grid['num_x'] / 2.),\n 'xCell3': int(grid['num_x'] * 2. / 3),\n 'zCell1': int(grid['num_z'] / 2.),\n 'zCell2': int(grid['num_z'] * 2. / 3),\n 'zCell3': int(grid['num_z'] * 4. / 5),\n }\n\n\ndef generate_field_comparison_report(data, run_dir):\n params = data['models']['fieldComparisonReport']\n dimension = params['dimension']\n with h5py.File(str(py.path.local(run_dir).join(_COMPARISON_FILE))) as f:\n values = f['data/{}/meshes/E/{}'.format(COMPARISON_STEP_SIZE, dimension)]\n values = values[:, 0, :]\n radius = _meters(data['models']['simulationGrid']['channel_width'] / 2.)\n x_range = [-radius, radius]\n z_range = [0, _meters(data['models']['simulationGrid']['plate_spacing'])]\n plots, y_range = _create_plots(dimension, data, values, z_range if dimension == 'x' else x_range)\n plot_range = x_range if dimension == 'x' else z_range\n return {\n 'title': 'Comparison of E {}'.format(dimension),\n 'y_label': 'E {} [V/m]'.format(dimension),\n 'x_label': '{} [m]'.format(dimension),\n 'y_range': y_range,\n 'x_range': [plot_range[0], plot_range[1], len(plots[0]['points'])],\n 'plots': plots,\n }\n\n\ndef get_animation_name(data):\n return 'animation'\n\n\ndef get_application_data(data):\n if data['method'] == 'compute_simulation_steps':\n run_dir = simulation_db.simulation_dir(SIM_TYPE, data['simulationId']).join('fieldReport')\n if run_dir.exists():\n res = simulation_db.read_result(run_dir)[0]\n if res and 'tof_expected' in res:\n return {\n 'timeOfFlight': res['tof_expected'],\n 'steps': res['steps_expected'],\n 'electronFraction': res['e_cross'] if 'e_cross' in res else 0,\n }\n return {}\n raise RuntimeError('unknown application data method: {}'.format(data['method']))\n\n\ndef get_data_file(run_dir, model, frame, **kwargs):\n if model == 'particleAnimation' or model == 'egunCurrentAnimation' or model == 'particle3d':\n filename = str(run_dir.join(_PARTICLE_FILE if model == 'particleAnimation' or model == 'particle3d' else _EGUN_CURRENT_FILE))\n with open(filename) as f:\n return os.path.basename(filename), f.read(), 'application/octet-stream'\n #TODO(pjm): consolidate with template/warp.py\n files = _h5_file_list(run_dir, model)\n #TODO(pjm): last client file may have been deleted on a canceled animation,\n # give the last available file instead.\n if len(files) < frame + 1:\n frame = -1\n filename = str(files[int(frame)])\n with open(filename) as f:\n return os.path.basename(filename), f.read(), 'application/octet-stream'\n\n\ndef get_zcurrent_new(particle_array, momenta, mesh, particle_weight, dz):\n \"\"\"\n Find z-directed current on a per cell basis\n particle_array: z positions at a given step\n momenta: particle momenta at a given step in SI units\n mesh: Array of Mesh spacings\n particle_weight: Weight from Warp\n dz: Cell Size\n \"\"\"\n current = np.zeros_like(mesh)\n velocity = constants.c * momenta / np.sqrt(momenta**2 + (constants.electron_mass * constants.c)**2) * particle_weight\n\n for index, zval in enumerate(particle_array):\n bucket = np.round(zval/dz) #value of the bucket/index in the current array\n current[int(bucket)] += velocity[index]\n\n return current * constants.elementary_charge / dz\n\n\ndef get_simulation_frame(run_dir, data, model_data):\n frame_index = int(data['frameIndex'])\n if data['modelName'] == 'currentAnimation':\n data_file = open_data_file(run_dir, data['modelName'], frame_index)\n return _extract_current(model_data, data_file)\n if data['modelName'] == 'fieldAnimation':\n args = template_common.parse_animation_args(data, {'': ['field', 'startTime']})\n data_file = open_data_file(run_dir, data['modelName'], frame_index)\n return _extract_field(args.field, model_data, data_file)\n if data['modelName'] == 'particleAnimation' or data['modelName'] == 'particle3d':\n args = template_common.parse_animation_args(data, {'': ['renderCount', 'startTime']})\n return _extract_particle(run_dir, model_data, int(args.renderCount))\n if data['modelName'] == 'egunCurrentAnimation':\n return _extract_egun_current(model_data, run_dir.join(_EGUN_CURRENT_FILE), frame_index)\n if data['modelName'] == 'impactDensityAnimation':\n return _extract_impact_density(run_dir, model_data)\n raise RuntimeError('{}: unknown simulation frame model'.format(data['modelName']))\n\n\ndef lib_files(data, source_lib):\n \"\"\"No lib files\"\"\"\n return []\n\n\ndef models_related_to_report(data):\n \"\"\"What models are required for this data['report']\n\n Args:\n data (dict): simulation\n Returns:\n list: Named models, model fields or values (dict, list) that affect report\n \"\"\"\n if data['report'] == 'animation':\n return []\n res = ['beam', 'simulationGrid', 'conductors', 'conductorTypes']\n if data['report'] != 'fieldComparisonReport':\n res.append(template_common.report_fields(data, data['report'], _REPORT_STYLE_FIELDS))\n return res\n\n\ndef open_data_file(run_dir, model_name, file_index=None):\n \"\"\"Opens data file_index'th in run_dir\n\n Args:\n run_dir (py.path): has subdir ``hdf5``\n file_index (int): which file to open (default: last one)\n\n Returns:\n OrderedMapping: various parameters\n \"\"\"\n files = _h5_file_list(run_dir, model_name)\n res = pkcollections.OrderedMapping()\n res.num_frames = len(files)\n res.frame_index = res.num_frames - 1 if file_index is None else file_index\n res.filename = str(files[res.frame_index])\n res.iteration = int(re.search(r'data(\\d+)', res.filename).group(1))\n return res\n\n\ndef prepare_output_file(report_info, data):\n if data['report'] == 'fieldComparisonReport':\n run_dir = report_info.run_dir\n fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir)\n if fn.exists():\n fn.remove()\n simulation_db.write_result(generate_field_comparison_report(data, run_dir), run_dir=run_dir)\n\n\ndef python_source_for_model(data, model):\n return _generate_parameters_file(data, is_parallel=True)\n\n\ndef remove_last_frame(run_dir):\n for m in ('currentAnimation', 'fieldAnimation'):\n files = _h5_file_list(run_dir, m)\n if len(files) > 0:\n pkio.unchecked_remove(files[-1])\n\n\ndef write_parameters(data, run_dir, is_parallel):\n \"\"\"Write the parameters file\n\n Args:\n data (dict): input\n run_dir (py.path): where to write\n is_parallel (bool): run in background?\n \"\"\"\n pkio.write_text(\n run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(\n data,\n run_dir,\n is_parallel,\n ),\n )\n\n\ndef _add_particle_paths(electrons, x_points, y_points, z_points, half_height, limit):\n # adds paths for the particleAnimation report\n # culls adjacent path points with similar slope\n count = 0\n cull_count = 0\n for i in range(min(len(electrons[1]), limit)):\n res = {'x': [], 'y': [], 'z': []}\n num_points = len(electrons[1][i])\n prev = [None, None, None]\n for j in range(num_points):\n curr = [\n electrons[1][i][j],\n electrons[0][i][j],\n electrons[2][i][j],\n ]\n if j > 0 and j < num_points - 1:\n next = [\n electrons[1][i][j+1],\n electrons[0][i][j+1],\n electrons[2][i][j+1]\n ]\n if _cull_particle_point(curr, next, prev):\n cull_count += 1\n continue\n res['x'].append(curr[0])\n res['y'].append(curr[1])\n res['z'].append(curr[2])\n prev = curr\n count += len(res['x'])\n x_points.append(res['x'])\n y_points.append(res['y'])\n z_points.append(res['z'])\n pkdc('particles: {} paths, {} points {} points culled', len(x_points), count, cull_count)\n\n\ndef _create_plots(dimension, data, values, x_range):\n params = data['models']['fieldComparisonReport']\n y_range = None\n visited = {}\n plots = []\n #TODO(pjm): keep in sync with warpvnd.js cell colors\n color = ['red', 'green', 'blue']\n max_index = values.shape[1] if dimension == 'x' else values.shape[0]\n x_points = np.linspace(x_range[0], x_range[1], values.shape[1] if dimension == 'x' else values.shape[0])\n for i in (1, 2, 3):\n f = '{}Cell{}'.format('z' if dimension == 'x' else 'x', i)\n index = params[f]\n if index >= max_index:\n index = max_index - 1\n if index in visited:\n continue\n visited[index] = True\n if dimension == 'x':\n points = values[:, index].tolist()\n else:\n points = values[index, :].tolist()\n if dimension == 'x':\n pos = u'{:.3f} µm'.format(x_points[index] * 1e6)\n else:\n pos = '{:.0f} nm'.format(x_points[index] * 1e9)\n plots.append({\n 'points': points,\n #TODO(pjm): refactor with template_common.compute_plot_color_and_range()\n 'color': color[i - 1],\n 'label': u'{} Location {}'.format('Z' if dimension == 'x' else 'X', pos),\n })\n if y_range:\n y_range[0] = min(y_range[0], min(points))\n y_range[1] = max(y_range[1], max(points))\n else:\n y_range = [min(points), max(points)]\n return plots, y_range\n\n\ndef _cull_particle_point(curr, next, prev):\n # check all three dimensions xy, xz, yz\n if _particle_line_has_slope(curr, next, prev, 0, 1) \\\n or _particle_line_has_slope(curr, next, prev, 0, 2) \\\n or _particle_line_has_slope(curr, next, prev, 1, 2):\n return False\n return True\n\n\ndef _extract_current(data, data_file):\n grid = data['models']['simulationGrid']\n plate_spacing = _meters(grid['plate_spacing'])\n dz = plate_spacing / grid['num_z']\n zmesh = np.linspace(0, plate_spacing, grid['num_z'] + 1) #holds the z-axis grid points in an array\n report_data = readparticles(data_file.filename)\n data_time = report_data['time']\n with h5py.File(data_file.filename, 'r') as f:\n weights = np.array(f['data/{}/particles/beam/weighting'.format(data_file.iteration)])\n curr = get_zcurrent_new(report_data['beam'][:,4], report_data['beam'][:,5], zmesh, weights, dz)\n return _extract_current_results(data, curr, data_time)\n\n\ndef _extract_current_results(data, curr, data_time):\n grid = data['models']['simulationGrid']\n plate_spacing = _meters(grid['plate_spacing'])\n zmesh = np.linspace(0, plate_spacing, grid['num_z'] + 1) #holds the z-axis grid points in an array\n beam = data['models']['beam']\n if data.models.simulationGrid.simulation_mode == '3d':\n cathode_area = _meters(grid['channel_width']) * _meters(grid['channel_height'])\n else:\n cathode_area = _meters(grid['channel_width'])\n RD_ideal = sources.j_rd(beam['cathode_temperature'], beam['cathode_work_function']) * cathode_area\n JCL_ideal = sources.cl_limit(beam['cathode_work_function'], beam['anode_work_function'], beam['anode_voltage'], plate_spacing) * cathode_area\n\n if beam['currentMode'] == '2' or (beam['currentMode'] == '1' and beam['beam_current'] >= JCL_ideal):\n curr2 = np.full_like(zmesh, JCL_ideal)\n y2_title = 'Child-Langmuir cold limit'\n else:\n curr2 = np.full_like(zmesh, RD_ideal)\n y2_title = 'Richardson-Dushman'\n return {\n 'title': 'Current for Time: {:.4e}s'.format(data_time),\n 'x_range': [0, plate_spacing],\n 'y_label': 'Current [A]',\n 'x_label': 'Z [m]',\n 'points': [\n curr.tolist(),\n curr2.tolist(),\n ],\n 'x_points': zmesh.tolist(),\n 'y_range': [min(np.min(curr), np.min(curr2)), max(np.max(curr), np.max(curr2))],\n 'y1_title': 'Current',\n 'y2_title': y2_title,\n }\n\n\ndef _extract_egun_current(data, data_file, frame_index):\n v = np.load(str(data_file))\n if frame_index >= len(v):\n frame_index = -1;\n # the first element in the array is the time, the rest are the current measurements\n return _extract_current_results(data, v[frame_index][1:], v[frame_index][0])\n\n\ndef _extract_field(field, data, data_file):\n grid = data['models']['simulationGrid']\n plate_spacing = _meters(grid['plate_spacing'])\n beam = data['models']['beam']\n radius = _meters(grid['channel_width'] / 2.)\n selector = field\n if not field == 'phi':\n selector = 'E/{}'.format(field)\n with h5py.File(data_file.filename, 'r') as f:\n values = np.array(f['data/{}/meshes/{}'.format(data_file.iteration, selector)])\n data_time = f['data/{}'.format(data_file.iteration)].attrs['time']\n dt = f['data/{}'.format(data_file.iteration)].attrs['dt']\n if field == 'phi':\n values = values[0,:,:]\n title = 'ϕ'\n else:\n values = values[:,0,:]\n title = 'E {}'.format(field)\n return {\n 'x_range': [0, plate_spacing, len(values[0])],\n 'y_range': [- radius, radius, len(values)],\n 'x_label': 'z [m]',\n 'y_label': 'x [m]',\n 'title': '{} for Time: {:.4e}s, Step {}'.format(title, data_time, data_file.iteration),\n 'aspect_ratio': 6.0 / 14,\n 'z_matrix': values.tolist(),\n }\n\n\ndef _extract_impact_density(run_dir, data):\n plot_info = np.load(str(run_dir.join(_DENSITY_FILE))).tolist()\n if 'error' in plot_info:\n return plot_info\n #TODO(pjm): consolidate these parameters into one routine used by all reports\n grid = data['models']['simulationGrid']\n plate_spacing = _meters(grid['plate_spacing'])\n beam = data['models']['beam']\n radius = _meters(grid['channel_width'] / 2.)\n\n dx = plot_info['dx']\n dz = plot_info['dz']\n gated_ids = plot_info['gated_ids']\n lines = []\n\n for i in gated_ids:\n v = gated_ids[i]\n for pos in ('bottom', 'left', 'right', 'top'):\n if pos in v:\n zmin, zmax, xmin, xmax = v[pos]['limits']\n row = {\n 'density': v[pos]['density'].tolist(),\n }\n if pos in ('bottom', 'top'):\n row['align'] = 'horizontal'\n row['points'] = [zmin, zmax, xmin + dx / 2.]\n else:\n row['align'] = 'vertical'\n row['points'] = [xmin, xmax, zmin + dz / 2.]\n lines.append(row)\n\n return {\n 'title': 'Impact Density',\n 'x_range': [0, plate_spacing],\n 'y_range': [-radius, radius],\n 'y_label': 'x [m]',\n 'x_label': 'z [m]',\n 'density_lines': lines,\n 'v_min': plot_info['min'],\n 'v_max': plot_info['max'],\n }\n\n\ndef _extract_particle(run_dir, data, limit):\n v = np.load(str(run_dir.join(_PARTICLE_FILE)))\n kept_electrons = v[0]\n lost_electrons = v[1]\n grid = data['models']['simulationGrid']\n plate_spacing = _meters(grid['plate_spacing'])\n beam = data['models']['beam']\n radius = _meters(grid['channel_width'] / 2.)\n half_height = grid['channel_height'] if 'channel_height' in grid else 5.\n half_height = _meters(half_height / 2.)\n x_points = []\n y_points = []\n z_points = []\n _add_particle_paths(kept_electrons, x_points, y_points, z_points, half_height, limit)\n lost_x = []\n lost_y = []\n lost_z = []\n _add_particle_paths(lost_electrons, lost_x, lost_y, lost_z, half_height, limit)\n return {\n 'title': 'Particle Trace',\n 'x_range': [0, plate_spacing],\n 'y_label': 'x [m]',\n 'x_label': 'z [m]',\n 'z_label': 'y [m]',\n 'points': y_points,\n 'x_points': x_points,\n 'z_points': z_points,\n 'y_range': [-radius, radius],\n 'z_range': [-half_height, half_height],\n 'lost_x': lost_x,\n 'lost_y': lost_y,\n 'lost_z': lost_z\n }\n\n\ndef _generate_impact_density():\n return '''\nfrom rswarp.diagnostics import ImpactDensity\ntry:\n plot_density = ImpactDensity.PlotDensity(None, None, scraper, top, w3d)\n plot_density.gate_scraped_particles()\n plot_density.map_density()\n for gid in plot_density.gated_ids:\n for side in plot_density.gated_ids[gid]:\n del plot_density.gated_ids[gid][side]['interpolation']\n density_results = {\n 'gated_ids': plot_density.gated_ids,\n 'dx': plot_density.dx,\n 'dz': plot_density.dz,\n 'min': plot_density.cmap_normalization.vmin,\n 'max': plot_density.cmap_normalization.vmax,\n }\nexcept AssertionError as e:\n density_results = {\n 'error': e.message,\n }\n ''' + '''\nnp.save('{}', density_results)\n '''.format(_DENSITY_FILE)\n\n\ndef _generate_lattice(data):\n conductorTypeMap = {}\n for ct in data.models.conductorTypes:\n conductorTypeMap[ct.id] = ct\n\n res = 'conductors = ['\n for c in data.models.conductors:\n ct = conductorTypeMap[c.conductorTypeId]\n permittivity = ''\n if ct.isConductor == '0':\n permittivity = ', permittivity={}'.format(float(ct.permittivity))\n res += \"\\n\" + ' Box({}, {}, {}, voltage={}, xcent={}, ycent=0.0, zcent={}{}),'.format(\n _meters(ct.xLength), _meters(ct.yLength), _meters(ct.zLength), ct.voltage, _meters(c.xCenter), _meters(c.zCenter), permittivity)\n res += '''\n]\nfor c in conductors:\n if c.voltage != 0.0:\n installconductor(c)\n\nscraper = ParticleScraper([source, plate] + conductors, lcollectlpdata=True)\n '''\n return res\n\n\ndef _generate_parameters_file(data, run_dir=None, is_parallel=False):\n v = None\n template_common.validate_models(data, _SCHEMA)\n v = template_common.flatten_data(data['models'], {})\n v['outputDir'] = '\"{}\"'.format(run_dir) if run_dir else None\n v['particlePeriod'] = _PARTICLE_PERIOD\n v['particleFile'] = _PARTICLE_FILE\n v['impactDensityCalculation'] = _generate_impact_density()\n v['egunCurrentFile'] = _EGUN_CURRENT_FILE\n v['conductorLatticeAndParticleScraper'] = _generate_lattice(data)\n v['maxConductorVoltage'] = _max_conductor_voltage(data)\n v['is3D'] = data.models.simulationGrid.simulation_mode == '3d'\n if not v['is3D']:\n v['simulationGrid_num_y'] = v['simulationGrid_num_x']\n v['simulationGrid_channel_height'] = v['simulationGrid_channel_width']\n template_name = ''\n if 'report' not in data:\n template_name = 'visualization'\n elif data['report'] == 'animation':\n if data['models']['simulation']['egun_mode'] == '1':\n v['egunStatusFile'] = _EGUN_STATUS_FILE\n template_name = 'egun'\n else:\n template_name = 'visualization'\n else:\n template_name = 'source-field'\n return template_common.render_jinja(SIM_TYPE, v, 'base.py') \\\n + template_common.render_jinja(SIM_TYPE, v, '{}.py'.format(template_name))\n\n\ndef _h5_file_list(run_dir, model_name):\n return pkio.walk_tree(\n run_dir.join('diags/xzsolver/hdf5' if model_name == 'currentAnimation' else 'diags/fields/electric'),\n r'\\.h5$',\n )\n\n\ndef _max_conductor_voltage(data):\n type_by_id = {}\n for conductor_type in data.models.conductorTypes:\n type_by_id[conductor_type.id] = conductor_type\n max_voltage = data.models.beam.anode_voltage\n for conductor in data.models.conductors:\n conductor_type = type_by_id[conductor.conductorTypeId]\n if conductor_type.voltage > max_voltage:\n max_voltage = conductor_type.voltage\n return max_voltage\n\n\ndef _meters(v):\n # convert microns to meters\n return float(v) * 1e-6\n\n\ndef _particle_line_has_slope(curr, next, prev, i1, i2):\n return abs(\n _slope(curr[i1], curr[i2], next[i1], next[i2]) - _slope(prev[i1], prev[i2], curr[i1], curr[i2])\n ) >= _CULL_PARTICLE_SLOPE\n\n\ndef _slope(x1, y1, x2, y2):\n if x2 - x1 == 0:\n # treat no slope as flat for comparison\n return 0\n return (y2 - y1) / (x2 - x1)\n"
] |
[
[
"numpy.sqrt",
"numpy.linspace",
"numpy.min",
"numpy.round",
"numpy.full_like",
"numpy.max",
"numpy.zeros_like"
]
] |
Abhiseksah/Tic-tak-toe-3
|
[
"cddeb5405f5629446711eb07c962ce6da9fafdc6"
] |
[
"game.py"
] |
[
"from tkinter import *\nfrom tkinter import messagebox\nimport os\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n#----------------\nimport mysql.connector\n#------------------\nimport matplotlib.pyplot as plt\nimport numpy as np\n#---------------------\n\nmydb=mysql.connector.connect(host=\"localhost\",user=\"root\",passwd=\"abhi\",database=\"tictaktoe\")\nmycursor=mydb.cursor()\n\nglobal temp\ntemp=0\nt=0\nglobal still_running\nstill_running=True\nroot= Tk()\n\nroot.geometry(\"400x160\")\nroot.maxsize(400,160)\nroot.minsize(400,160)\nroot.title(\"Tic-Tak-Toe\")\n\nboard=[\"-\",\"-\",\"-\",\n\t\t\"-\",\"-\",\"-\"\n\t\t,\"-\",\"-\",\"-\"]\n\n# game counter section------AND DISPLAY--------start\nmaster_count=open(\"C:/Python27/count.txt\",'r')\n#master_count.write(\"1\")\ngame_counter=master_count.read()\nmaster_count.close()\ndog=int(game_counter)\n#print(type(dog))\ndog+=1\n#print(dog)\nmaster_count=open(\"C:/Python27/count.txt\",'w')\nccc=str(dog)\nmaster_count.write(ccc)\nmaster_count.close()\n\nvar1=StringVar()\nlabel=Label(root,textvariable=var1,fg=\"red\")\nvar1.set(ccc+\"th time played the game.\")\nlabel.pack(side=TOP)\n\n\n# game counter section --------------END\n\n\n\n\ndef change1():\n\tglobal temp\n\ttemp+=1\n\tif temp%2==0:\n\t\tboard[0]='O'\n\t\tb=Button(text=\"O\",fg=\"red2\",width=5,height=2)\n\t\tb.place(x=30,y=20)\n\t\tcheck()\n\n\telse:\n\t\tboard[0]='X'\n\t\tb=Button(text=\"X\",fg=\"green4\",width=5,height=2)\n\t\tb.place(x=30,y=20)\n\t\tcheck()\n\n\ndef change2():\n\tglobal temp\n\ttemp+=1\n\tif temp%2==0:\n\t\tboard[1]='O'\n\t\tb=Button(text=\"O\",fg=\"red2\",width=5,height=2)\n\t\tb.place(x=75,y=20)\n\t\tcheck()\n\telse:\n\t\tboard[1]='X'\n\t\tb=Button(text=\"X\",fg=\"green4\",width=5,height=2)\n\t\tb.place(x=75,y=20)\n\t\tcheck()\n\ndef change3():\n\tglobal temp\n\ttemp+=1\n\tif temp%2==0:\n\t\tboard[2]='O'\t\n\t\tb=Button(text=\"O\",fg=\"red2\",width=5,height=2)\n\t\tb.place(x=120,y=20)\n\t\tcheck()\n\telse:\n\t\tboard[2]='X'\n\t\tb=Button(text=\"X\",fg=\"green4\",width=5,height=2)\n\t\tb.place(x=120,y=20)\n\t\tcheck()\n\ndef change4():\n\tglobal temp\n\ttemp+=1\n\tif temp%2==0:\n\t\tboard[3]='O'\t\n\t\tb=Button(text=\"O\",fg=\"red2\",width=5,height=2)\n\t\tb.place(x=30,y=60)\n\t\tcheck()\n\telse:\n\t\tboard[3]='X'\n\t\tb=Button(text=\"X\",fg=\"green4\",width=5,height=2)\n\t\tb.place(x=30,y=60)\n\t\tcheck()\n\ndef change5():\n\tglobal temp\n\ttemp+=1\n\tif temp%2==0:\n\t\tboard[4]='O'\t\n\t\tb=Button(text=\"O\",fg=\"red2\",width=5,height=2)\n\t\tb.place(x=75,y=60)\n\t\tcheck()\n\telse:\n\t\tboard[4]='X'\n\t\tb=Button(text=\"X\",fg=\"green4\",width=5,height=2)\n\t\tb.place(x=75,y=60)\n\t\tcheck()\n\ndef change6():\n\tglobal b6\n\tglobal temp\n\ttemp+=1\n\tif temp%2==0:\n\t\tboard[5]='O'\t\n\t\tb6=Button(text=\"O\",fg=\"red2\",width=5,height=2)\n\t\tb6.place(x=120,y=60)\n\t\tcheck()\n\telse:\n\t\tboard[5]='X'\n\t\tb6=Button(text=\"X\",fg=\"green4\",width=5,height=2)\n\t\tb6.place(x=120,y=60)\n\t\tcheck()\n\ndef change7():\n\tglobal temp\n\ttemp+=1\n\tif temp%2==0:\t\n\t\tboard[6]='O'\n\t\tb=Button(text=\"O\",fg=\"red2\",width=5,height=2)\n\t\tb.place(x=30,y=100)\n\t\tcheck()\n\telse:\n\t\tboard[6]='X'\n\t\tb=Button(text=\"X\",fg=\"green4\",width=5,height=2)\n\t\tb.place(x=30,y=100)\n\t\tcheck()\n\ndef change8():\n\tglobal temp\n\ttemp+=1\n\tif temp%2==0:\t\n\t\tboard[7]='O'\n\t\tb=Button(text=\"O\",fg=\"red2\",width=5,height=2)\n\t\tb.place(x=75,y=100)\n\t\tcheck()\n\telse:\n\t\tboard[7]='X'\n\t\tb=Button(text=\"X\",fg=\"green4\",width=5,height=2)\n\t\tb.place(x=75,y=100)\n\t\tcheck()\n\ndef change9():\n\tglobal temp\n\ttemp+=1\n\tif temp%2==0:\t\n\t\tboard[8]='O'\n\t\tb=Button(text=\"O\",fg=\"red2\",width=5,height=2)\n\t\tb.place(x=120,y=100)\n\t\tcheck()\n\telse:\n\t\tboard[8]='X'\n\t\tb=Button(text=\"X\",fg=\"green4\",width=5,height=2)\n\t\tb.place(x=120,y=100)\n\t\tcheck()\n\n\n#first row\nb1=Button(text=\"1\",command=change1,width=5,height=2,bg=\"grey\")\nb1.place(x=30,y=20)\n\nb=Button(text=\"2\",command=change2,width=5,height=2,bg=\"grey\")\nb.place(x=75,y=20)\n\nb=Button(text=\"3\",command=change3,width=5,height=2,bg=\"grey\")\nb.place(x=120,y=20)\n\n\n#2nd row\nb=Button(text=\"4\",command=change4,width=5,height=2,bg=\"grey\")\nb.place(x=30,y=60)\n\nb=Button(text=\"5\",command=change5,width=5,height=2,bg=\"grey\")\nb.place(x=75,y=60)\n\nb=Button(text=\"6\",command=change6,width=5,height=2,bg=\"grey\")\nb.place(x=120,y=60)\n\n\n#3rd row\nb=Button(text=\"7\",command=change7,width=5,height=2,bg=\"grey\")\nb.place(x=30,y=100)\n\nb=Button(text=\"8\",command=change8,width=5,height=2,bg=\"grey\")\nb.place(x=75,y=100)\n\nb=Button(text=\"9\",command=change9,width=5,height=2,bg=\"grey\")\nb.place(x=120,y=100)\n\n#data Analysis and graph+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n\ndef showdata():\n\t\tmycursor.execute(\"select count(*) from data\")\n\t\tcounter=mycursor.fetchall()\n\n\t\tmycursor.execute(\"select * from data\")\n\t\tresult=mycursor.fetchall()\n\t\tsss=str(result)\n\t\t#print(sss)\n\t\t#print(type(sss))\n\t\tglobal abhi\n\t\tabhi=Toplevel()\n\t\tabhi.geometry(\"200x500\")\n\t\tres=list(reversed(result))\n\t\tfor i in range(int(counter[0][0])):\n\t\t\tfor j in range(2):\n\t\t\t\tvar=StringVar()\n\t\t\t\tlabel=Label(abhi,textvariable=var,relief=RAISED,padx=15)\n\t\t\t\t#str=list(result[i])\n\t\t\t\t#x=str.split()\n\t\t\t\t#print(result[i][j])\n\t\t\t\tvar.set(res[i][j])\n\t\t\t\tlabel.grid(row=i,column=j)\n\ndef delete():\n\tmycursor.execute(\"delete from data\")\n\tmydb.commit()\n\tsumx=0\n\tsumo=0\n\tsumtie=0\n\tx_counter=0\n\to_counter=0\n\ttie_counter=0\n\t\n\t\ndef statistic():\n\tmycursor.execute(\"select count(*) from data where win='X won'\")\n\tsumx=mycursor.fetchall()\n\tmycursor.execute(\"select count(*) from data where win='O won'\")\n\tsumo=mycursor.fetchall()\n\tmycursor.execute(\"select count(*) from data where win='Tie'\")\n\tsumtie=mycursor.fetchall()\n\tlabels='X won','O won','Tie'\n\tsize=[sumx[0][0],sumo[0][0],sumtie[0][0]]\n\t\n\tm=max(size)\n\tif m==sumx[0][0]:\n\t\texplode=[0.05,0,0]\n\tif m==sumo[0][0]:\n\t\texplode=[0,0.1,0]\n\tif m==sumtie[0][0]:\n\t\texplode=[0,0,0.1]\n\n\ty=['x won','O won','Tie',]\n\tplt.subplot(2,1,1)\n\tplt.bar(y,size,align='center',alpha=0.5)\n\tplt.ylabel('Times won')\n\t#plt.pie(y)\n\tplt.title('Statistic')\n\tplt.subplot(2,1,2)\n\t#plt.title('Pie Graph')\n\tplt.pie(size,labels=labels,shadow=True,autopct='%1.1f%%',explode=explode)\n\n\tplt.show()\n\t#print(x_counter,o_counter,tie_counter)\n\ndef email():\n\tmycursor.execute(\"select * from data\")\n\tcontent=mycursor.fetchall()\n\tattachment=open(\"C:/Python27/test11.txt\",'w')\n\tfor i in content:\n\t\tfor j in i:\n\t\t\tattachment.write(j+\" \")\n\t\tattachment.write(\"\\n\")\n\t##f=open(\"C:/Python27/test11.txt\",'r')\n\t#print(f.read())\n\t##msg = MIMEText(f.read())\n\tattachment.close()\n\n\n\t##msg['Subject'] = 'The contents of %s' % textfile\n\t##msg['From'] = me\n\t##msg['To'] = you\n\n\n\t#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n\t# Python code to illustrate Sending mail with attachments \n# from your Gmail account \n\n# libraries to be imported \n\timport smtplib \n\tfrom email.mime.multipart import MIMEMultipart \n\tfrom email.mime.text import MIMEText \n\tfrom email.mime.base import MIMEBase \n\tfrom email import encoders \n\n\tfromaddr = \"[email protected]\"\n\ttoaddr = e.get()\n\n\t# instance of MIMEMultipart \n\tmsg = MIMEMultipart() \n\n\t# storing the senders email address \n\tmsg['From'] = fromaddr \n\n\t# storing the receivers email address \n\tmsg['To'] = toaddr \n\n\t# storing the subject \n\tmsg['Subject'] = \"History of game\"\n\n\t# string to store the body of the mail \n\tbody = \"just checking\"\n\n\t# attach the body with the msg instance \n\tmsg.attach(MIMEText(body, 'plain')) \n\n\t# open the file to be sent \n\tfilename = \"test11.txt\"\n\tattachment = open(\"C:/Python27/test11.txt\", \"r\") \n\n\t# instance of MIMEBase and named as p \n\tp = MIMEBase('application', 'octet-stream') \n\n\t# To change the payload into encoded form \n\tp.set_payload((attachment).read()) \n\n\t# encode into base64 \n\tencoders.encode_base64(p) \n\n\tp.add_header('Content-Disposition', \"attachment; filename = %s\\n\" % filename) \n\n\t# attach the instance 'p' to instance 'msg' \n\tmsg.attach(p) \n\n\t# creates SMTP session \n\ts = smtplib.SMTP('smtp.gmail.com', 587) \n\n\t# start TLS for security \n\ts.starttls() \n\n\t# Authentication \n\ts.login(fromaddr, \"TYPE YOUR GMAIL PASSWORD HERE\") \n\n\t# Converts the Multipart msg into a string \n\ttext = msg.as_string() \n\n\t# sending the mail \n\ts.sendmail(fromaddr, toaddr, text) \n\n\t# terminating the session \n\ts.quit() \n\t\n\t#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n\n\t# Send the message via our own SMTP server, but don't include the\n\t# envelope header.\n\t#s = smtplib.SMTP('smtp.gamil.com',587)\n\t#s.sendmail(me, [you], msg.as_string())\n\t#s.quit()\n\n\n\t\n\t#sender=\"[email protected]\"\n\t#rec=\"[email protected]\"\n\t#passwd=\"abhisekSAH@11\"\n\t#msg=\"heeeeeeeeeee hooooooooo\"\n\n\t#server = smtplib.SMTP('smtp.gmail.com',587)\n\t#server.starttls()\n\t#server.login(sender,passwd)\n\t#print(\"successful\")\n\t#server.sendmail(sender,rec,msg)\n\t#print(\"done\")\n#END--------END-----------END EMAIL PORTION\nframe=Frame(root)\nframe.place(x=200,y=30)\n\n#bottomframe=Frame(root)\n#bottomframe.pack(side= LEFT)\n\nab=Button(frame,text=\"Show History\",padx=54,bg=\"thistle2\",command=showdata)\nab.pack(side=TOP)\n\nab=Button(frame,text=\"Clear History\",padx=55,bg=\"thistle2\",command=delete)\nab.pack(side=TOP)\n\nab=Button(frame,text=\"Send data to me\",padx=50,bg=\"thistle2\",command=email)\nab.pack(side=BOTTOM)\ne=Entry(frame,width=30,fg=\"grey1\",bg=\"OliveDrab1\")\ne.pack(side=BOTTOM)\nab=Button(frame,text=\"Statistic graph\",padx=51,bg=\"thistle2\",command=statistic)\nab.pack(side=BOTTOM)\n#End of graph++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n#check winner\ndef check_winner():\n\tcheck_row()\n\tcheck_col()\n\tcheck_dia()\n\tcheck_tie()\n\n#-------------------------row---------\n\ndef check_row():\n\tp=board[0]==board[1]==board[2]!='-'\n\tq=board[3]==board[4]==board[5]!='-'\n\tr=board[6]==board[7]==board[8]!='-'\n\t\n\tglobal still_running\n\tif (p or q or r ):\n\t\tstill_running=False\n\t\t\n\n\n#------------------------col---------\n\ndef check_col():\n\tp=board[0]==board[3]==board[6]!='-'\n\tq=board[1]==board[4]==board[7]!='-'\n\tr=board[2]==board[5]==board[8]!='-'\n\tglobal still_running\n\tif p or q or r :\n\t\tstill_running=False\n#------------------------dia------------\n\ndef check_dia():\n\tp=board[0]==board[4]==board[8]!='-'\n\tq=board[2]==board[4]==board[6]!='-'\n\tglobal still_running\n\tif p or q :\n\t\tstill_running=False\n#==================print tie\n\ndef check_tie():\n\tglobal still_running\n\tglobal temp\n\tif '-' not in board:\n\t\tif still_running :\n\t\t\t#global still_running\n\t\t\tstill_running=False\n\t\t\tglobal t\n\t\t\tt=10\n\t\t\t#print(\"The game is Tie\")\n\n\ndef print_winner():\n\tglobal answer\n\tglobal t\n\tif t==10:\n\t\t#print(\"The game is tie\\n\\n\")\n\t\t#insert data into my sql____________________________________________________________\n\t\tmycursor.execute(\"insert into data (win,date) values ('Tie',current_timestamp)\")\n\t\tmydb.commit()\n\t\tmycursor.close()\n\t\n\t\tanswer=messagebox.askquestion(\"Tic-Tak-Toe\",\"The game is tie\\nWant to play again??\")\n\t\tans()\n\telse:\n\t\tif temp%2==0:\n\t\t\t#print(\"O won\\n\\n\")\n\t\t\tmycursor.execute(\"insert into data (win,date) values ('O won',current_timestamp)\")\n\t\t\tmydb.commit()\n\t\t\tmycursor.close()\n\t\t\t\n\t\t\tanswer=messagebox.askquestion(\"Tic-Tak-Toe\",\"O Won the game\\nWant to play again??\")\n\t\t\tans()\n\t\telse:\n\t\t\t#print(\"X won\\n\\n\")\n\t\t\tmycursor.execute(\"insert into data (win,date) values ('X won',current_timestamp)\")\n\t\t\tmydb.commit()\n\t\t\tmycursor.close()\n\t\t\t\n\t\t\tanswer=messagebox.askquestion(\"Tic-Tak-Toe\",\"X Won the game\\nWant to Play again??\")\n\t\t\tans()\n\n\n#just checking\ndef display():\n\tprint(board[0]+\"|\"+board[1]+\"|\"+board[2])\n\tprint(board[3]+\"|\"+board[4]+\"|\"+board[5])\n\tprint(board[6]+\"|\"+board[7]+\"|\"+board[8])\n\tprint(\"\\n\")\n\n\n\ndef check():\n\t#display()\n\tcheck_winner()\n\tif still_running==False:\n\t\tprint_winner()\n\t\t#root.destroy()\n\n\ndef ans():\n\tif answer=='yes':\n\t\troot.destroy()\n\t\tos.startfile(\"C:/Users/HOME/tictaktoe_3rdUpdate.py\")\n\t\n\telse:\n\t\troot.destroy()\n\n\n\nroot.mainloop()\n\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.pie",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
modestyachts/CIFAR-10.1
|
[
"d9982abb0bfc4846b8d13a11e66b887d946205d0"
] |
[
"code/cifar10.py"
] |
[
"# Derived from https://github.com/jaberg/skdata/blob/master/skdata/cifar10/dataset.py\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport pickle\n\nimport numpy as np\n\nclass CIFAR10Data(object):\n def __init__(self, path):\n train_filenames = ['data_batch_{}'.format(ii + 1) for ii in range(5)]\n eval_filename = 'test_batch'\n metadata_filename = 'batches.meta'\n\n train_images = np.zeros((50000, 32, 32, 3), dtype='uint8')\n train_labels = np.zeros(50000, dtype='int32')\n for ii, fname in enumerate(train_filenames):\n cur_images, cur_labels = self._load_datafile(os.path.join(path, fname))\n train_images[ii * 10000 : (ii+1) * 10000, ...] = cur_images\n train_labels[ii * 10000 : (ii+1) * 10000, ...] = cur_labels\n eval_images, eval_labels = self._load_datafile(\n os.path.join(path, eval_filename))\n\n with open(os.path.join(path, metadata_filename), 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n self.label_names = data_dict[b'label_names']\n for ii in range(len(self.label_names)):\n self.label_names[ii] = self.label_names[ii].decode('utf-8')\n\n self.train_images = train_images\n self.train_labels = train_labels\n self.eval_images = eval_images\n self.eval_labels = eval_labels\n self.all_images = np.vstack([self.train_images, self.eval_images])\n self.all_labels = np.concatenate([self.train_labels, self.eval_labels])\n assert self.all_images.shape == (60000, 32, 32, 3)\n assert self.all_labels.shape == (60000,)\n \n def compute_l2_distances(self, x):\n return np.sqrt(np.sum(np.square(self.all_images.astype(np.float64) - x.astype(np.float64)), axis=(1,2,3)))\n \n @staticmethod\n def _load_datafile(filename):\n with open(filename, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n assert data_dict[b'data'].dtype == np.uint8\n image_data = data_dict[b'data']\n image_data = image_data.reshape((10000, 3, 32, 32)).transpose(0, 2, 3, 1)\n return image_data, np.array(data_dict[b'labels'])\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] |
cancan101/Lasagne
|
[
"9f591a5f3a192028df9947ba1e4903b3b46e8fe0"
] |
[
"lasagne/updates.py"
] |
[
"\"\"\"\nFunctions to generate Theano update dictionaries for training.\n\nThe update functions implement different methods to control the learning\nrate for use with stochastic gradient descent.\n\nUpdate functions take a loss expression or a list of gradient expressions and\na list of parameters as input and return an ordered dictionary of updates:\n\n.. autosummary::\n :nosignatures:\n\n sgd\n momentum\n nesterov_momentum\n adagrad\n rmsprop\n adadelta\n adam\n\nTwo functions can be used to further modify the updates to include momentum:\n\n.. autosummary::\n :nosignatures:\n\n apply_momentum\n apply_nesterov_momentum\n\nFinally, we provide two helper functions to constrain the norm of tensors:\n\n.. autosummary::\n :nosignatures:\n\n norm_constraint\n total_norm_constraint\n\n:func:`norm_constraint()` can be used to constrain the norm of parameters\n(as an alternative to weight decay), or for a form of gradient clipping.\n:func:`total_norm_constraint()` constrain the total norm of a list of tensors.\nThis is often used when training recurrent neural networks.\n\nExamples\n--------\n>>> import lasagne\n>>> import theano.tensor as T\n>>> import theano\n>>> from lasagne.nonlinearities import softmax\n>>> from lasagne.layers import InputLayer, DenseLayer, get_output\n>>> from lasagne.updates import sgd, apply_momentum\n>>> l_in = InputLayer((100, 20))\n>>> l1 = DenseLayer(l_in, num_units=3, nonlinearity=softmax)\n>>> x = T.matrix('x') # shp: num_batch x num_features\n>>> y = T.ivector('y') # shp: num_batch\n>>> l_out = get_output(l1, x)\n>>> params = lasagne.layers.get_all_params(l1)\n>>> loss = T.mean(T.nnet.categorical_crossentropy(l_out, y))\n>>> updates_sgd = sgd(loss, params, learning_rate=0.0001)\n>>> updates = apply_momentum(updates_sgd, params, momentum=0.9)\n>>> train_function = theano.function([x, y], updates=updates)\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport numpy as np\n\nimport theano\nimport theano.tensor as T\nfrom . import utils\n\n__all__ = [\n \"sgd\",\n \"apply_momentum\",\n \"momentum\",\n \"apply_nesterov_momentum\",\n \"nesterov_momentum\",\n \"adagrad\",\n \"rmsprop\",\n \"adadelta\",\n \"adam\",\n \"norm_constraint\",\n \"total_norm_constraint\"\n]\n\n\ndef get_or_compute_grads(loss_or_grads, params):\n \"\"\"Helper function returning a list of gradients\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to return the gradients for\n\n Returns\n -------\n list of expressions\n If `loss_or_grads` is a list, it is assumed to be a list of\n gradients and returned as is, unless it does not match the length\n of `params`, in which case a `ValueError` is raised.\n Otherwise, `loss_or_grads` is assumed to be a cost expression and\n the function returns `theano.grad(loss_or_grads, params)`.\n \"\"\"\n if isinstance(loss_or_grads, list):\n if not len(loss_or_grads) == len(params):\n raise ValueError(\"Got %d gradient expressions for %d parameters\" %\n (len(loss_or_grads), len(params)))\n return loss_or_grads\n else:\n return theano.grad(loss_or_grads, params)\n\n\ndef sgd(loss_or_grads, params, learning_rate):\n \"\"\"Stochastic Gradient Descent (SGD) updates\n\n Generates update expressions of the form:\n\n * ``param := param - learning_rate * gradient``\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n \"\"\"\n grads = get_or_compute_grads(loss_or_grads, params)\n updates = OrderedDict()\n\n for param, grad in zip(params, grads):\n updates[param] = param - learning_rate * grad\n\n return updates\n\n\ndef apply_momentum(updates, params=None, momentum=0.9):\n \"\"\"Returns a modified update dictionary including momentum\n\n Generates update expressions of the form:\n\n * ``velocity := momentum * velocity + updates[param] - param``\n * ``param := param + velocity``\n\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n params : iterable of shared variables, optional\n The variables to apply momentum to. If omitted, will apply\n momentum to all `updates.keys()`.\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n\n Returns\n -------\n OrderedDict\n A copy of `updates` with momentum updates for all `params`.\n\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1 - momentum`.\n\n See Also\n --------\n momentum : Shortcut applying momentum to SGD updates\n \"\"\"\n if params is None:\n params = updates.keys()\n updates = OrderedDict(updates)\n\n for param in params:\n value = param.get_value(borrow=True)\n velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=param.broadcastable)\n x = momentum * velocity + updates[param]\n updates[velocity] = x - param\n updates[param] = x\n\n return updates\n\n\ndef momentum(loss_or_grads, params, learning_rate, momentum=0.9):\n \"\"\"Stochastic Gradient Descent (SGD) updates with momentum\n\n Generates update expressions of the form:\n\n * ``velocity := momentum * velocity - learning_rate * gradient``\n * ``param := param + velocity``\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1 - momentum`.\n\n See Also\n --------\n apply_momentum : Generic function applying momentum to updates\n nesterov_momentum : Nesterov's variant of SGD with momentum\n \"\"\"\n updates = sgd(loss_or_grads, params, learning_rate)\n return apply_momentum(updates, momentum=momentum)\n\n\ndef apply_nesterov_momentum(updates, params=None, momentum=0.9):\n \"\"\"Returns a modified update dictionary including Nesterov momentum\n\n Generates update expressions of the form:\n\n * ``velocity := momentum * velocity + updates[param] - param``\n * ``param := param + momentum * velocity + updates[param] - param``\n\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n params : iterable of shared variables, optional\n The variables to apply momentum to. If omitted, will apply\n momentum to all `updates.keys()`.\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n\n Returns\n -------\n OrderedDict\n A copy of `updates` with momentum updates for all `params`.\n\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1 - momentum`.\n\n The classic formulation of Nesterov momentum (or Nesterov accelerated\n gradient) requires the gradient to be evaluated at the predicted next\n position in parameter space. Here, we use the formulation described at\n https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,\n which allows the gradient to be evaluated at the current parameters.\n\n See Also\n --------\n nesterov_momentum : Shortcut applying Nesterov momentum to SGD updates\n \"\"\"\n if params is None:\n params = updates.keys()\n updates = OrderedDict(updates)\n\n for param in params:\n value = param.get_value(borrow=True)\n velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=param.broadcastable)\n x = momentum * velocity + updates[param] - param\n updates[velocity] = x\n updates[param] = momentum * x + updates[param]\n\n return updates\n\n\ndef nesterov_momentum(loss_or_grads, params, learning_rate, momentum=0.9):\n \"\"\"Stochastic Gradient Descent (SGD) updates with Nesterov momentum\n\n Generates update expressions of the form:\n\n * ``velocity := momentum * velocity + updates[param] - param``\n * ``param := param + momentum * velocity + updates[param] - param``\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1 - momentum`.\n\n The classic formulation of Nesterov momentum (or Nesterov accelerated\n gradient) requires the gradient to be evaluated at the predicted next\n position in parameter space. Here, we use the formulation described at\n https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,\n which allows the gradient to be evaluated at the current parameters.\n\n See Also\n --------\n apply_nesterov_momentum : Function applying momentum to updates\n \"\"\"\n updates = sgd(loss_or_grads, params, learning_rate)\n return apply_nesterov_momentum(updates, momentum=momentum)\n\n\ndef adagrad(loss_or_grads, params, learning_rate=1.0, epsilon=1e-6):\n \"\"\"Adagrad updates\n\n Scale learning rates by dividing with the square root of accumulated\n squared gradients. See [1]_ for further description.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n epsilon : float or symbolic scalar\n Small value added for numerical stability\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n Using step size eta Adagrad calculates the learning rate for feature i at\n time step t as:\n\n .. math:: \\\\eta_{t,i} = \\\\frac{\\\\eta}\n {\\\\sqrt{\\\\sum^t_{t^\\\\prime} g^2_{t^\\\\prime,i}+\\\\epsilon}} g_{t,i}\n\n as such the learning rate is monotonically decreasing.\n\n Epsilon is not included in the typical formula, see [2]_.\n\n References\n ----------\n .. [1] Duchi, J., Hazan, E., & Singer, Y. (2011):\n Adaptive subgradient methods for online learning and stochastic\n optimization. JMLR, 12:2121-2159.\n\n .. [2] Chris Dyer:\n Notes on AdaGrad. http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf\n \"\"\"\n\n grads = get_or_compute_grads(loss_or_grads, params)\n updates = OrderedDict()\n\n for param, grad in zip(params, grads):\n value = param.get_value(borrow=True)\n accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=param.broadcastable)\n accu_new = accu + grad ** 2\n updates[accu] = accu_new\n updates[param] = param - (learning_rate * grad /\n T.sqrt(accu_new + epsilon))\n\n return updates\n\n\ndef rmsprop(loss_or_grads, params, learning_rate=1.0, rho=0.9, epsilon=1e-6):\n \"\"\"RMSProp updates\n\n Scale learning rates by dividing with the moving average of the root mean\n squared (RMS) gradients. See [1]_ for further description.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n rho : float or symbolic scalar\n Gradient moving average decay factor\n epsilon : float or symbolic scalar\n Small value added for numerical stability\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n `rho` should be between 0 and 1. A value of `rho` close to 1 will decay the\n moving average slowly and a value close to 0 will decay the moving average\n fast.\n\n Using the step size :math:`\\\\eta` and a decay factor :math:`\\\\rho` the\n learning rate :math:`\\\\eta_t` is calculated as:\n\n .. math::\n r_t &= \\\\rho r_{t-1} + (1-\\\\rho)*g^2\\\\\\\\\n \\\\eta_t &= \\\\frac{\\\\eta}{\\\\sqrt{r_t + \\\\epsilon}}\n\n References\n ----------\n .. [1] Tieleman, T. and Hinton, G. (2012):\n Neural Networks for Machine Learning, Lecture 6.5 - rmsprop.\n Coursera. http://www.youtube.com/watch?v=O3sxAc4hxZU (formula @5:20)\n \"\"\"\n grads = get_or_compute_grads(loss_or_grads, params)\n updates = OrderedDict()\n\n for param, grad in zip(params, grads):\n value = param.get_value(borrow=True)\n accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=param.broadcastable)\n accu_new = rho * accu + (1 - rho) * grad ** 2\n updates[accu] = accu_new\n updates[param] = param - (learning_rate * grad /\n T.sqrt(accu_new + epsilon))\n\n return updates\n\n\ndef adadelta(loss_or_grads, params, learning_rate=1.0, rho=0.95, epsilon=1e-6):\n \"\"\" Adadelta updates\n\n Scale learning rates by a the ratio of accumulated gradients to accumulated\n step sizes, see [1]_ and notes for further description.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n rho : float or symbolic scalar\n Squared gradient moving average decay factor\n epsilon : float or symbolic scalar\n Small value added for numerical stability\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n rho should be between 0 and 1. A value of rho close to 1 will decay the\n moving average slowly and a value close to 0 will decay the moving average\n fast.\n\n rho = 0.95 and epsilon=1e-6 are suggested in the paper and reported to\n work for multiple datasets (MNIST, speech).\n\n In the paper, no learning rate is considered (so learning_rate=1.0).\n Probably best to keep it at this value.\n epsilon is important for the very first update (so the numerator does\n not become 0).\n\n Using the step size eta and a decay factor rho the learning rate is\n calculated as:\n\n .. math::\n r_t &= \\\\rho r_{t-1} + (1-\\\\rho)*g^2\\\\\\\\\n \\\\eta_t &= \\\\eta \\\\frac{\\\\sqrt{s_{t-1} + \\\\epsilon}}\n {\\sqrt{r_t + \\epsilon}}\\\\\\\\\n s_t &= \\\\rho s_{t-1} + (1-\\\\rho)*g^2\n\n References\n ----------\n .. [1] Zeiler, M. D. (2012):\n ADADELTA: An Adaptive Learning Rate Method.\n arXiv Preprint arXiv:1212.5701.\n \"\"\"\n grads = get_or_compute_grads(loss_or_grads, params)\n updates = OrderedDict()\n\n for param, grad in zip(params, grads):\n value = param.get_value(borrow=True)\n # accu: accumulate gradient magnitudes\n accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=param.broadcastable)\n # delta_accu: accumulate update magnitudes (recursively!)\n delta_accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=param.broadcastable)\n\n # update accu (as in rmsprop)\n accu_new = rho * accu + (1 - rho) * grad ** 2\n updates[accu] = accu_new\n\n # compute parameter update, using the 'old' delta_accu\n update = (grad * T.sqrt(delta_accu + epsilon) /\n T.sqrt(accu_new + epsilon))\n updates[param] = param - learning_rate * update\n\n # update delta_accu (as accu, but accumulating updates)\n delta_accu_new = rho * delta_accu + (1 - rho) * update ** 2\n updates[delta_accu] = delta_accu_new\n\n return updates\n\n\ndef adam(loss_or_grads, params, learning_rate=0.001, beta1=0.9,\n beta2=0.999, epsilon=1e-8):\n \"\"\"Adam updates\n\n Adam updates implemented as in [1]_.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float\n Learning rate\n beta_1 : float\n Exponential decay rate for the first moment estimates.\n beta_2 : float\n Exponential decay rate for the second moment estimates.\n epsilon : float\n Constant for numerical stability.\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n The paper [1]_ includes an additional hyperparameter lambda. This is only\n needed to prove convergence of the algorithm and has no practical use\n (personal communication with the authors), it is therefore omitted here.\n\n References\n ----------\n .. [1] Kingma, Diederik, and Jimmy Ba (2014):\n Adam: A Method for Stochastic Optimization.\n arXiv preprint arXiv:1412.6980.\n \"\"\"\n all_grads = get_or_compute_grads(loss_or_grads, params)\n t_prev = theano.shared(utils.floatX(0.))\n updates = OrderedDict()\n\n for param, g_t in zip(params, all_grads):\n m_prev = theano.shared(param.get_value() * 0.)\n v_prev = theano.shared(param.get_value() * 0.)\n t = t_prev + 1\n m_t = beta1*m_prev + (1-beta1)*g_t\n v_t = beta2*v_prev + (1-beta2)*g_t**2\n a_t = learning_rate*T.sqrt(1-beta2**t)/(1-beta1**t)\n step = a_t*m_t/(T.sqrt(v_t) + epsilon)\n\n updates[m_prev] = m_t\n updates[v_prev] = v_t\n updates[param] = param - step\n\n updates[t_prev] = t\n return updates\n\n\ndef norm_constraint(tensor_var, max_norm, norm_axes=None, epsilon=1e-7):\n \"\"\"Max weight norm constraints and gradient clipping\n\n This takes a TensorVariable and rescales it so that incoming weight\n norms are below a specified constraint value. Vectors violating the\n constraint are rescaled so that they are within the allowed range.\n\n Parameters\n ----------\n tensor_var : TensorVariable\n Theano expression for update, gradient, or other quantity.\n max_norm : scalar\n This value sets the maximum allowed value of any norm in\n `tensor_var`.\n norm_axes : sequence (list or tuple)\n The axes over which to compute the norm. This overrides the\n default norm axes defined for the number of dimensions\n in `tensor_var`. When this is not specified and `tensor_var` is a\n matrix (2D), this is set to `(0,)`. If `tensor_var` is a 3D, 4D or\n 5D tensor, it is set to a tuple listing all axes but axis 0. The\n former default is useful for working with dense layers, the latter\n is useful for 1D, 2D and 3D convolutional layers.\n (Optional)\n epsilon : scalar, optional\n Value used to prevent numerical instability when dividing by\n very small or zero norms.\n\n Returns\n -------\n TensorVariable\n Input `tensor_var` with rescaling applied to weight vectors\n that violate the specified constraints.\n\n Examples\n --------\n >>> param = theano.shared(\n ... np.random.randn(100, 200).astype(theano.config.floatX))\n >>> update = param + 100\n >>> update = norm_constraint(update, 10)\n >>> func = theano.function([], [], updates=[(param, update)])\n >>> # Apply constrained update\n >>> _ = func()\n >>> from lasagne.utils import compute_norms\n >>> norms = compute_norms(param.get_value())\n >>> np.isclose(np.max(norms), 10)\n True\n\n Notes\n -----\n When `norm_axes` is not specified, the axes over which the norm is\n computed depend on the dimensionality of the input variable. If it is\n 2D, it is assumed to come from a dense layer, and the norm is computed\n over axis 0. If it is 3D, 4D or 5D, it is assumed to come from a\n convolutional layer and the norm is computed over all trailing axes\n beyond axis 0. For other uses, you should explicitly specify the axes\n over which to compute the norm using `norm_axes`.\n \"\"\"\n ndim = tensor_var.ndim\n\n if norm_axes is not None:\n sum_over = tuple(norm_axes)\n elif ndim == 2: # DenseLayer\n sum_over = (0,)\n elif ndim in [3, 4, 5]: # Conv{1,2,3}DLayer\n sum_over = tuple(range(1, ndim))\n else:\n raise ValueError(\n \"Unsupported tensor dimensionality {}.\"\n \"Must specify `norm_axes`\".format(ndim)\n )\n\n dtype = np.dtype(theano.config.floatX).type\n norms = T.sqrt(T.sum(T.sqr(tensor_var), axis=sum_over, keepdims=True))\n target_norms = T.clip(norms, 0, dtype(max_norm))\n constrained_output = \\\n (tensor_var * (target_norms / (dtype(epsilon) + norms)))\n\n return constrained_output\n\n\ndef total_norm_constraint(tensor_vars, max_norm, epsilon=1e-7,\n return_norm=False):\n \"\"\"Rescales a list of tensors based on their combined norm\n\n If the combined norm of the input tensors exceeds the threshold then all\n tensors are rescaled such that the combined norm is equal to the threshold.\n\n Scaling the norms of the gradients is often used when training recurrent\n neural networks [1]_.\n\n Parameters\n ----------\n tensor_vars : List of TensorVariables.\n Tensors to be rescaled.\n threshold : float\n Threshold value for total norm.\n epsilon : scalar, optional\n Value used to prevent numerical instability when dividing by\n very small or zero norms.\n return_norm : bool\n If true the total norm is also returned.\n\n Returns\n -------\n tensor_vars_scaled : list of TensorVariables\n The scaled tensor variables.\n norm : Theano scalar\n The combined norms of the input variables prior to rescaling,\n only returned if ``return_norms=True``.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> import lasagne\n >>> from lasagne.updates import sgd, total_norm_constraint\n >>> x = T.matrix()\n >>> y = T.ivector()\n >>> l_in = InputLayer((5, 10))\n >>> l1 = DenseLayer(l_in, num_units=7, nonlinearity=T.nnet.softmax)\n >>> output = lasagne.layers.get_output(l1, x)\n >>> cost = T.mean(T.nnet.categorical_crossentropy(output, y))\n >>> all_params = lasagne.layers.get_all_params(l1)\n >>> all_grads = T.grad(cost, all_params)\n >>> scaled_grads = total_norm_constraint(all_grads, 5)\n >>> updates = sgd(scaled_grads, all_params, learning_rate=0.1)\n\n Notes\n -----\n The total norm can be used to monitor training.\n\n References\n ----------\n .. [1] Sutskever, I., Vinyals, O., & Le, Q. V. (2014): Sequence to sequence\n learning with neural networks. In Advances in Neural Information\n Processing Systems (pp. 3104-3112).\n \"\"\"\n norm = T.sqrt(sum(T.sum(tensor**2) for tensor in tensor_vars))\n dtype = np.dtype(theano.config.floatX).type\n target_norm = T.clip(norm, 0, dtype(max_norm))\n multiplier = target_norm / (dtype(epsilon) + norm)\n tensor_vars_scaled = [step*multiplier for step in tensor_vars]\n\n if return_norm:\n return tensor_vars_scaled, norm\n else:\n return tensor_vars_scaled\n"
] |
[
[
"numpy.zeros",
"numpy.dtype"
]
] |
JStuckner/DnD-GUI-2
|
[
"bf393c94e48df2e17113046b7f917ac2176ac8e8"
] |
[
"dnd_gui/util/encounter.py"
] |
[
"from random import randint, shuffle\nimport numpy as np\n\ndef order(NPCI):\n #print(NPCI)\n #PC initiative\n numPCs = 4\n SethI = 10\n AnastasiaI = 2\n CameronI = 5\n TravisI = 2\n\n try:\n numNPC = len(NPCI)\n except TypeError:\n numNPC = 1\n\n #Set up matrix\n order = np.zeros(numPCs+numNPC,\n dtype=[('name',(str,12)) ,('roll', (int))])\n order['name'][0] = \"Travis\"\n order['name'][1] = \"Cameron\"\n order['name'][2] = \"Seth\"\n order['name'][3] = \"Anastasia\"\n \n #NPC colors.\n NPCs = ('Blue',\n 'Red',\n 'Purple',\n 'Green',\n 'Gray',\n 'Yellow',\n 'Light Green',\n 'Pink',\n )\n\n for i in range(numNPC):\n if i < len(NPCs):\n try:\n order['name'][4+i] = NPCs[i]\n except TypeError:\n order['name'][4+i] = NPCs\n else:\n order['name'][4+i] = 'NPC_' + str(i+1)\n\n\n\n #Roll for PCs\n order['roll'][2] = SethI + randint(1,20)\n order['roll'][3] = AnastasiaI + randint(1,20)\n order['roll'][1] = CameronI + randint(1,20)\n order['roll'][0] = TravisI + max(randint(1,20), randint(1,20))\n\n #Roll for NPCs\n for i in range(numNPC):\n try:\n order['roll'][i+4] = NPCI[i] + randint(1,20) \n except TypeError:\n order['roll'][i+4] = NPCI + randint(1,20)\n \n #Sort the array\n np.random.shuffle(order)\n order.sort(order='roll')\n\n #Print the order\n l = len(order)\n output = ''\n for i in range(l):\n offset = 12 - len(order['name'][l-i-1])\n #print(order['name'][l-i-1],' ' * offset, '=',order['roll'][l-i-1])\n output = output+str(order['name'][l-i-1])+' '*offset+'='+str(order['roll'][l-i-1])+'\\n'\n\n\n return output\n \n\ndef zombie(num=1):\n for i in range(num):\n print(\"zombie spawns at (%d,%d).\" %(randint(1,35),randint(1,25)))\n\n#order((-2,-2,-2,-2))\n\n"
] |
[
[
"numpy.zeros",
"numpy.random.shuffle"
]
] |
guqin/xmas2020
|
[
"04ec60b39fcf8df4f84e954c61f8562e8821a98b"
] |
[
"xmas_lights.py"
] |
[
"# some code adapted from DanStach/rpi-ws2811\r\nimport time\r\nimport board\r\nimport neopixel\r\nimport requests\r\nimport blynklib\r\nimport random\r\nimport multiprocessing as mp\r\nimport numpy as np\r\n\r\nBLYNK_AUTH = open('blynk_auth.txt').read().strip()\r\n\r\n# initialize blynk\r\nblynk = blynklib.Blynk(BLYNK_AUTH)\r\n\r\n\r\n\r\n# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18\r\n# NeoPixels must be connected to D10, D12, D18 or D21 to work.\r\npixel_pin = board.D18\r\n\r\n# The number of NeoPixels\r\nnum_pixels = 50\r\n\r\n# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!\r\n# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.\r\nORDER = neopixel.RGB\r\n\r\npixels = neopixel.NeoPixel(\r\n pixel_pin, num_pixels, brightness=0.2, auto_write=False, pixel_order=ORDER\r\n)\r\n\r\n\r\n\r\n\r\nwait_time = .5\r\nwait_animate = .1\r\ncycleFactor = 1\r\n\r\ncred = (255, 0, 0)\r\ncblue = (0, 0, 255)\r\ncgreen = (0, 255, 0)\r\ncyellow = (255, 255, 0)\r\nccyan = (0, 255, 255)\r\ncpurple = (160, 32, 240)\r\ncpurple2 = (76,0,200)\r\ncpurple3 = (100,0,200)\r\ncorange = (255, 165, 0)\r\ncwhite = (255, 255, 255)\r\ncblk = (0, 0, 0)\r\n\r\n\r\ndef brightnessRGB(red, green, blue, bright):\r\n r = (bright/256.0)*red\r\n g = (bright/256.0)*green\r\n b = (bright/256.0)*blue\r\n return (int(r), int(g), int(b))\r\n \r\n\r\ndef off():\r\n pixels.fill((0, 0, 0))\r\n pixels.show()\r\n\r\ndef red():\r\n pixels.fill((255, 0, 0))\r\n pixels.show()\r\n\r\ndef green():\r\n pixels.fill((0, 255, 0))\r\n pixels.show()\r\n\r\ndef blue():\r\n pixels.fill((0, 0, 255))\r\n pixels.show()\r\n \r\ndef magenta():\r\n pixels.fill((255, 0, 255))\r\n pixels.show()\r\n\r\ndef yellow():\r\n pixels.fill((255, 255, 0))\r\n pixels.show()\r\n\r\ndef cyan():\r\n pixels.fill((0, 255, 255))\r\n pixels.show()\r\n\r\ndef white():\r\n pixels.fill((255, 255, 255))\r\n pixels.show()\r\n\r\ndef wheel(pos):\r\n # Input a value 0 to 255 to get a color value.\r\n # The colours are a transition r - g - b - back to r.\r\n if pos < 0 or pos > 255:\r\n r = g = b = 0\r\n elif pos < 85:\r\n r = int(pos * 3)\r\n g = int(255 - pos * 3)\r\n b = 0\r\n elif pos < 170:\r\n pos -= 85\r\n r = int(255 - pos * 3)\r\n g = 0\r\n b = int(pos * 3)\r\n else:\r\n pos -= 170\r\n r = 0\r\n g = int(pos * 3)\r\n b = int(255 - pos * 3)\r\n return (r, g, b) if ORDER in (neopixel.RGB, neopixel.GRB) else (r, g, b, 0)\r\n\r\n\r\ndef colorAll5Color(c1, c2, c3, c4, c5):\r\n for i in range(num_pixels):\r\n j = i % 5\r\n if(j == 1): # \r\n pixels[i] = c1\r\n if(j == 2): # \r\n pixels[i] = c2\r\n if(j == 3): # \r\n pixels[i] = c3\r\n if(j == 4): # \r\n pixels[i] = c4\r\n if(j == 0): \r\n pixels[i] = c5\r\n\r\ndef rainbow_cycle(wait=0.001):\r\n while True:\r\n for j in range(255):\r\n for i in range(num_pixels):\r\n pixel_index = (i * 256 // num_pixels) + j\r\n pixels[i] = wheel(pixel_index & 255)\r\n pixels.show()\r\n time.sleep(wait)\r\n\r\ndef knight_rider(wait=0.02):\r\n kernel = [(255,0,0)]*7\r\n while True:\r\n for i in list(range(3,num_pixels-3))+list(reversed(range(3,num_pixels-4))):\r\n pixels.fill((0, 0, 0))\r\n a = max(i-len(kernel)//2,0)\r\n b = min(i+len(kernel)//2+1, num_pixels)\r\n pixels[a:b] = kernel\r\n pixels.show()\r\n time.sleep(wait)\r\n\r\ndef dazzle(wait=0.02):\r\n i = 0\r\n while True:\r\n if i==0:\r\n pixels.fill((255, 0, 0))\r\n pixels.show()\r\n elif i==1:\r\n pixels.fill((0, 255, 0))\r\n pixels.show()\r\n else:\r\n pixels.fill((0, 0, 255))\r\n pixels.show()\r\n i = (i+1)%3\r\n time.sleep(wait)\r\n\r\ndef sine(wait=0.04):\r\n a = (np.sin(np.linspace(0,2*np.pi, num_pixels))*127).astype(np.int)\r\n a = a.clip(0,255)\r\n i = 0\r\n while True:\r\n pixels[:] = np.tile(np.roll(a,i),(3,1)).T\r\n pixels.show()\r\n i = (i+1)%num_pixels\r\n time.sleep(wait)\r\n\r\ndef twinkle(wait=0.02):\r\n p = 0.001\r\n maxDelay = int(4/wait)\r\n disableCount = np.zeros(num_pixels).astype(np.int)\r\n a = np.zeros(num_pixels).astype(np.int)\r\n while True:\r\n temp = np.random.rand(num_pixels) < p\r\n disableCount[temp] = (np.random.rand(temp.sum())*maxDelay).astype(np.int)\r\n a[disableCount>0] = 0\r\n a[disableCount==0] = 255\r\n disableCount = np.maximum(disableCount-1,0)\r\n pixels[:] = np.tile(a,(3,1)).T\r\n pixels.show()\r\n time.sleep(wait) \r\n\r\ndef random(wait=0.1):\r\n while True:\r\n pixels[:] = np.random.randint(0,256,(num_pixels,3))\r\n pixels.show()\r\n time.sleep(wait)\r\n\r\n\r\n\r\n# HalloweenExisiting - mimics a heart beat pulse, with 2 beats at different speeds. The existing colors \r\n# on the pixel strip are preserved, rather than a single color.\r\n#\r\n# HalloweenExisiting(beat1Step, beat1FadeInDelay, beat1FadeOutDelay, beat1Delay,\r\n# beat2Step, beat2FadeInDelay, beat2FadeOutDelay, beat1Delay, cycles):\r\n# HalloweenExisiting(3, .005, .003, 0.001, 6, .002, .003, 0.05, 10)\r\n#\r\n# beat1Step: (1-255) first beat color transition step\r\n# beat1FadeInDelay: (0-2147483647) first beat fade in trasition speed, in seconds\r\n# beat1FadeOutDelay: (0-2147483647) first beat fade out trasition speed, in seconds\r\n# beat1Delay: (0-2147483647) beat time delay bewteen frist and sencond beat, in seconds\r\n# beat2Step: (1-255) second beat color transition step\r\n# beat2FadeInDelay: (0-2147483647) second beat fade in trasition speed, in seconds\r\n# beat2FadeOutDelay: (0-2147483647) second beat fade out trasition speed, in seconds\r\n# beat1Delay: (0-2147483647) beat time delay bewteen sencond and first beat, in seconds\r\n# cycles: (1-2147483647) number of times this effect will run\r\ndef HalloweenExisiting(beat1Step, beat1FadeInDelay, beat1FadeOutDelay, beat1Delay, beat2Step, beat2FadeInDelay, beat2FadeOutDelay, beat2Delay, cycles):\r\n#HalloweenExisiting(beat1Step, beat1FadeInDelay, beat1FadeOutDelay, beat1Delay, \r\n# beat2Step, beat2FadeInDelay, beat2FadeOutDelay, beat2Delay, cycles):\r\n # gather existing colors in strip of pixel\r\n stripExisting = []\r\n maxbright = 256\r\n minbright = 15 \r\n for i in range(num_pixels):\r\n stripExisting.append(pixels[i])\r\n\r\n for loop in range(cycles): \r\n\r\n for ii in range(minbright, maxbright, beat1Step): #for ( ii = 1 ; ii <252 ; ii = ii = ii + x)\r\n for index in range(num_pixels):\r\n r = stripExisting[index][0]\r\n g = stripExisting[index][1]\r\n b = stripExisting[index][2]\r\n pixels[index] = brightnessRGB(r,g,b, ii) \r\n #pixels.fill( brightnessRGB(redo, greeno, blueo, ii) ) #strip.setBrightness(ii)\r\n pixels.show()\r\n time.sleep(beat1FadeInDelay)\r\n\r\n for ii in range(maxbright, minbright, -beat1Step): #for (int ii = 252 ; ii > 3 ; ii = ii - x){\r\n for index in range(num_pixels):\r\n r = stripExisting[index][0]\r\n g = stripExisting[index][1]\r\n b = stripExisting[index][2]\r\n pixels[index] = brightnessRGB(r,g,b, ii) \r\n #pixels.fill( brightnessRGB(redo, greeno, blueo, ii) ) #strip.setBrightness(ii)\r\n pixels.show()\r\n time.sleep(beat1FadeOutDelay)\r\n \r\n time.sleep(beat1Delay)\r\n \r\n for ii in range(minbright, maxbright, beat1Step): #for (int ii = 1 ; ii <255 ; ii = ii = ii + y){\r\n for index in range(num_pixels):\r\n r = stripExisting[index][0]\r\n g = stripExisting[index][1]\r\n b = stripExisting[index][2]\r\n pixels[index] = brightnessRGB(r,g,b, ii) \r\n #pixels.fill( brightnessRGB(redo, greeno, blueo, ii) ) #strip.setBrightness(ii)\r\n pixels.show()\r\n time.sleep(beat2FadeInDelay)\r\n\r\n for ii in range(maxbright, minbright, -beat1Step): #for (int ii = 255 ; ii > 1 ; ii = ii - y){\r\n for index in range(num_pixels):\r\n r = stripExisting[index][0]\r\n g = stripExisting[index][1]\r\n b = stripExisting[index][2]\r\n pixels[index] = brightnessRGB(r,g,b, ii) \r\n #pixels.fill( brightnessRGB(redo, greeno, blueo, ii) ) #strip.setBrightness(ii)\r\n pixels.show()\r\n time.sleep(beat2FadeOutDelay)\r\n \r\n\r\n\r\n# HeartBeatExisiting - mimics a heart beat pulse, with 2 beats at different speeds. The existing colors \r\n# on the pixel strip are preserved, rather than a single color.\r\n#\r\n# HeartBeatExisiting(beat1Step, beat1FadeInDelay, beat1FadeOutDelay, beat1Delay,\r\n# beat2Step, beat2FadeInDelay, beat2FadeOutDelay, beat1Delay, cycles):\r\n# HeartBeatExisiting(3, .005, .003, 0.001, 6, .002, .003, 0.05, 10)\r\n#\r\n# beat1Step: (1-255) first beat color transition step\r\n# beat1FadeInDelay: (0-2147483647) first beat fade in trasition speed, in seconds\r\n# beat1FadeOutDelay: (0-2147483647) first beat fade out trasition speed, in seconds\r\n# beat1Delay: (0-2147483647) beat time delay bewteen frist and sencond beat, in seconds\r\n# beat2Step: (1-255) second beat color transition step\r\n# beat2FadeInDelay: (0-2147483647) second beat fade in trasition speed, in seconds\r\n# beat2FadeOutDelay: (0-2147483647) second beat fade out trasition speed, in seconds\r\n# beat1Delay: (0-2147483647) beat time delay bewteen sencond and first beat, in seconds\r\n# cycles: (1-2147483647) number of times this effect will run\r\ndef HeartBeatExisiting(beat1Step, beat1FadeInDelay, beat1FadeOutDelay, beat1Delay, beat2Step, beat2FadeInDelay, beat2FadeOutDelay, beat2Delay, cycles):\r\n#HeartBeatExisiting(beat1Step, beat1FadeInDelay, beat1FadeOutDelay, beat1Delay, \r\n# beat2Step, beat2FadeInDelay, beat2FadeOutDelay, beat2Delay, cycles):\r\n # gather existing colors in strip of pixel\r\n stripExisting = []\r\n maxbright = 220\r\n minbright = 10\r\n for i in range(num_pixels):\r\n stripExisting.append(pixels[i])\r\n\r\n for loop in range(cycles): \r\n\r\n for ii in range(minbright, maxbright, beat1Step): #for ( ii = 1 ; ii <252 ; ii = ii = ii + x)\r\n for index in range(num_pixels):\r\n r = stripExisting[index][0]\r\n g = stripExisting[index][1]\r\n b = stripExisting[index][2]\r\n pixels[index] = brightnessRGB(r,g,b, ii) \r\n #pixels.fill( brightnessRGB(redo, greeno, blueo, ii) ) #strip.setBrightness(ii)\r\n pixels.show()\r\n time.sleep(beat1FadeInDelay)\r\n\r\n for ii in range(maxbright, minbright, -beat1Step): #for (int ii = 252 ; ii > 3 ; ii = ii - x){\r\n for index in range(num_pixels):\r\n r = stripExisting[index][0]\r\n g = stripExisting[index][1]\r\n b = stripExisting[index][2]\r\n pixels[index] = brightnessRGB(r,g,b, ii) \r\n #pixels.fill( brightnessRGB(redo, greeno, blueo, ii) ) #strip.setBrightness(ii)\r\n pixels.show()\r\n time.sleep(beat1FadeOutDelay)\r\n \r\n time.sleep(beat1Delay)\r\n \r\n for ii in range(minbright, maxbright, beat1Step): #for (int ii = 1 ; ii <255 ; ii = ii = ii + y){\r\n for index in range(num_pixels):\r\n r = stripExisting[index][0]\r\n g = stripExisting[index][1]\r\n b = stripExisting[index][2]\r\n pixels[index] = brightnessRGB(r,g,b, ii) \r\n #pixels.fill( brightnessRGB(redo, greeno, blueo, ii) ) #strip.setBrightness(ii)\r\n pixels.show()\r\n time.sleep(beat2FadeInDelay)\r\n\r\n for ii in range(maxbright, minbright, -beat1Step): #for (int ii = 255 ; ii > 1 ; ii = ii - y){\r\n for index in range(num_pixels):\r\n r = stripExisting[index][0]\r\n g = stripExisting[index][1]\r\n b = stripExisting[index][2]\r\n pixels[index] = brightnessRGB(r,g,b, ii) \r\n #pixels.fill( brightnessRGB(redo, greeno, blueo, ii) ) #strip.setBrightness(ii)\r\n pixels.show()\r\n time.sleep(beat2FadeOutDelay)\r\n \r\n\r\n\r\ndef CandleOrange(Count, FlickerDelay ):\r\n pixels.fill(corange) # start with orange\r\n for i in range(Count):\r\n flicknum = np.random.randint(0,num_pixels-1)\r\n for k in range(flicknum):\r\n index = np.random.randint(0,num_pixels-1)\r\n blueval = np.random.randint(0,75)\r\n pixels[index] = (255,128 + int(blueval/2),blueval)\r\n pixels.show()\r\n time.sleep(FlickerDelay)\r\n\r\n\r\ndef SnowSparkleExisting(Count, SparkleDelay, SpeedDelay):\r\n # gather existing colors in strip of pixel\r\n stripExisting = []\r\n for i in range(num_pixels):\r\n stripExisting.append(pixels[i])\r\n\r\n for i in range(Count):\r\n blinknum = np.random.randint(1,3)\r\n for k in range(blinknum):\r\n index = np.random.randint(0,num_pixels-1)\r\n pixels[index] = (255,255,255)\r\n pixels.show()\r\n time.sleep(SparkleDelay)\r\n for h in range(num_pixels):\r\n pixels[h] = stripExisting[h]\r\n pixels.show()\r\n speedvar = np.random.randint(1,3)\r\n time.sleep(SpeedDelay*speedvar)\r\n\r\n\r\ndef glow(wait=0.02):\r\n #a = (np.sin(np.linspace(0,2*np.pi, num_pixels))*127).astype(np.int)\r\n #a = a.clip(0,255)\r\n a = np.sin(np.linspace(0,2*np.pi*4, 463))+1\r\n i = 0\r\n j = 0\r\n while True:\r\n #pixels[:] = np.full((num_pixels,3), a[i])\r\n pixels[:] = np.clip((np.array([wheel(j)]*num_pixels)*a[i]).astype(np.int),0,255)\r\n pixels.show()\r\n i = (i+1)%463\r\n j = (j+1)%256\r\n time.sleep(wait)\r\n\r\ndef love_kisses():\r\n\ta = 0.75\r\n\tb = 0.25\r\n\teight = \"---..\"\r\n\twhile True:\r\n\t\tfor j in range(2):\r\n\t\t\tfor i in range(len(eight)):\r\n\t\t\t\tif i <= 2:\r\n\t\t\t\t\tpixels.fill((255, 0, 255))\r\n\t\t\t\t\tpixels.show()\r\n\t\t\t\t\ttime.sleep(a)\r\n\t\t\t\t\tpixels.fill((0, 0, 0))\r\n\t\t\t\t\tpixels.show()\r\n\t\t\t\t\ttime.sleep(b)\r\n\t\t\t\tif i > 2:\r\n\t\t\t\t\tpixels.fill((255, 0, 255))\r\n\t\t\t\t\tpixels.show()\r\n\t\t\t\t\ttime.sleep(b)\r\n\t\t\t\t\tpixels.fill((0, 0, 0))\r\n\t\t\t\t\tpixels.show()\r\n\t\t\t\t\ttime.sleep(b)\r\n\t\ttime.sleep(5)\r\n\r\n\r\ndef sparkle():\r\n print(\"enter spark\")\r\n colorAll5Color((255,0,0), cpurple ,(0,255,0), (0,0,255), (255,128,0 ))\r\n pixels.show()\r\n print(\"enter spark show\")\r\n while True:\r\n SnowSparkleExisting(1000*cycleFactor, .1, .4)\r\n\r\ndef heart():\r\n while True:\r\n pixels.fill(cred) \r\n scale = 10\r\n HeartBeatExisiting(16, .001*scale, .001*scale, 0.001*scale,16*scale, .001*scale, .001*scale, 0.001*scale, 10)\r\n\r\ndef candle():\r\n CandleOrange(1000*cycleFactor, .1)\r\n\r\n \r\ndef stpatrick():\r\n pixels.fill(cgreen) \r\n pixels.show()\r\n while True:\r\n pixels.fill(cgreen) \r\n SnowSparkleExisting(1000*cycleFactor, .1, .4)\r\n\r\ndef halloween():\r\n while True:\r\n pixels.fill(cpurple2) \r\n HalloweenExisiting(1, .002, .002, 0.002,1, .002, .002, 0.002, 10)\r\n\r\n# register handler for virtual pin V11 reading\r\[email protected]_event('read V0')\r\ndef read_virtual_pin_handler(pin):\r\n READ_PRINT_MSG = \"[READ_VIRTUAL_PIN_EVENT] Pin: V{}\"\r\n print(READ_PRINT_MSG.format(pin))\r\n #blynk.virtual_write(pin, random.randint(0, 255))\r\n\r\n\r\ncurrentFunc = off\r\ncurrentThread = mp.Process(target=currentFunc)\r\ncurrentThread.start()\r\n\r\[email protected]_event('write V2')\r\ndef write_virtual_pin_handler(pin, value):\r\n print(\"Received write to V2: {}\".format(value))\r\n\r\n# register handler for virtual pin V4 write event\r\[email protected]_event('write V0')\r\ndef write_virtual_pin_handler(pin, value):\r\n global currentFunc\r\n global currentThread\r\n WRITE_EVENT_PRINT_MSG = \"[WRITE_VIRTUAL_PIN_EVENT] Pin: V{} Value: '{}'\"\r\n print(WRITE_EVENT_PRINT_MSG.format(pin, value))\r\n #print(value, type(value))\r\n #print(pixels)\r\n val = int(value[0])\r\n\r\n funcDict = {\r\n 0: off,\r\n 1: red,\r\n 2: green,\r\n 3: blue,\r\n 4: magenta,\r\n 5: yellow,\r\n 6: cyan,\r\n 7: white,\r\n 8: rainbow_cycle,\r\n 9: knight_rider,\r\n 10: dazzle,\r\n 11: sine,\r\n 12: twinkle,\r\n 13: random,\r\n 14: glow,\r\n 15: love_kisses,\r\n 16: sparkle,\r\n 17: candle,\r\n 18: heart,\r\n 19: halloween\r\n }\r\n \r\n currentFunc = funcDict.get(val, off)\r\n print(currentFunc.__name__)\r\n blynk.virtual_write(1, currentFunc.__name__)\r\n currentThread.terminate()\r\n currentThread = mp.Process(target=currentFunc)\r\n currentThread.start()\r\n\r\n###########################################################\r\n# infinite loop that waits for event\r\n###########################################################\r\nwhile True:\r\n blynk.run()\r\n"
] |
[
[
"numpy.maximum",
"numpy.linspace",
"numpy.tile",
"numpy.random.rand",
"numpy.zeros",
"numpy.roll",
"numpy.random.randint"
]
] |
hypnaceae/FastSpeech2-Puppetry
|
[
"a7473998a6e0d455fbc7c77549294a79e9c29d5f"
] |
[
"utils/model.py"
] |
[
"import os\nimport json\n\nimport torch\nimport numpy as np\n\nimport hifigan\nfrom model import FastSpeech2, ScheduledOptim\n\n\ndef get_model(args, configs, device, train=False):\n (preprocess_config, model_config, train_config) = configs\n\n model = FastSpeech2(preprocess_config, model_config).to(device)\n if args.restore_step:\n ckpt_path = os.path.join(\n train_config[\"path\"][\"ckpt_path\"],\n \"{}.pth.tar\".format(args.restore_step),\n )\n ckpt = torch.load(ckpt_path)\n model.load_state_dict(ckpt[\"model\"])\n\n if train:\n scheduled_optim = ScheduledOptim(\n model, train_config, model_config, args.restore_step\n )\n if args.restore_step:\n scheduled_optim.load_state_dict(ckpt[\"optimizer\"])\n model.train()\n return model, scheduled_optim\n\n model.eval()\n model.requires_grad_ = False\n return model\n\n\ndef get_param_num(model):\n num_param = sum(param.numel() for param in model.parameters())\n return num_param\n\n\ndef get_vocoder(config, device):\n name = config[\"vocoder\"][\"model\"]\n speaker = config[\"vocoder\"][\"speaker\"]\n\n if name == \"MelGAN\":\n if speaker == \"LJSpeech\":\n vocoder = torch.hub.load(\n \"descriptinc/melgan-neurips\", \"load_melgan\", \"linda_johnson\"\n )\n elif speaker == \"universal\":\n vocoder = torch.hub.load(\n \"descriptinc/melgan-neurips\", \"load_melgan\", \"multi_speaker\"\n )\n vocoder.mel2wav.eval()\n vocoder.mel2wav.to(device)\n elif name == \"HiFi-GAN\":\n with open(\"hifigan/config.json\", \"r\") as f:\n config = json.load(f)\n config = hifigan.AttrDict(config)\n vocoder = hifigan.Generator(config)\n if speaker == \"LJSpeech\":\n ckpt = torch.load(\"hifigan/generator_LJSpeech.pth.tar\", map_location=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"))\n elif speaker == \"universal\":\n ckpt = torch.load(\"hifigan/generator_universal.pth.tar\", map_location=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"))\n vocoder.load_state_dict(ckpt[\"generator\"])\n vocoder.eval()\n vocoder.remove_weight_norm()\n vocoder.to(device)\n\n return vocoder\n\n\ndef vocoder_infer(mels, vocoder, model_config, preprocess_config, lengths=None):\n name = model_config[\"vocoder\"][\"model\"]\n with torch.no_grad():\n if name == \"MelGAN\":\n wavs = vocoder.inverse(mels / np.log(10))\n elif name == \"HiFi-GAN\":\n wavs = vocoder(mels).squeeze(1)\n\n wavs = (\n wavs.cpu().numpy()\n * preprocess_config[\"preprocessing\"][\"audio\"][\"max_wav_value\"]\n ).astype(\"int16\")\n wavs = [wav for wav in wavs]\n\n for i in range(len(mels)):\n if lengths is not None:\n wavs[i] = wavs[i][: lengths[i]]\n\n return wavs\n"
] |
[
[
"numpy.log",
"torch.load",
"torch.no_grad",
"torch.cuda.is_available",
"torch.hub.load"
]
] |
zhupengyang/Paddle
|
[
"c560a7d57aad990f374ebadd330351f18e2ca65f"
] |
[
"python/paddle/fluid/tests/unittests/test_pool1d_api.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\nimport paddle.fluid.core as core\nimport paddle.fluid as fluid\nfrom paddle.fluid import compiler, Program, program_guard\nimport paddle\nimport paddle.nn.functional as F\nimport paddle.fluid as fluid\n\n\ndef adaptive_start_index(index, input_size, output_size):\n return int(np.floor(index * input_size / output_size))\n\n\ndef adaptive_end_index(index, input_size, output_size):\n return int(np.ceil((index + 1) * input_size / output_size))\n\n\ndef max_pool1D_forward_naive(x,\n ksize,\n strides,\n paddings,\n global_pool=0,\n ceil_mode=False,\n exclusive=False,\n adaptive=False,\n data_type=np.float64):\n N, C, L = x.shape\n if global_pool == 1:\n ksize = [L]\n if adaptive:\n L_out = ksize[0]\n else:\n L_out = (L - ksize[0] + 2 * paddings[0] + strides[0] - 1\n ) // strides[0] + 1 if ceil_mode else (\n L - ksize[0] + 2 * paddings[0]) // strides[0] + 1\n\n out = np.zeros((N, C, L_out))\n for i in range(L_out):\n if adaptive:\n r_start = adaptive_start_index(i, L, ksize[0])\n r_end = adaptive_end_index(i, L, ksize[0])\n else:\n r_start = np.max((i * strides[0] - paddings[0], 0))\n r_end = np.min((i * strides[0] + ksize[0] - paddings[0], L))\n x_masked = x[:, :, r_start:r_end]\n\n out[:, :, i] = np.max(x_masked, axis=(2))\n return out\n\n\ndef avg_pool1D_forward_naive(x,\n ksize,\n strides,\n paddings,\n global_pool=0,\n ceil_mode=False,\n exclusive=False,\n adaptive=False,\n data_type=np.float64):\n N, C, L = x.shape\n if global_pool == 1:\n ksize = [L]\n if adaptive:\n L_out = ksize[0]\n else:\n L_out = (L - ksize[0] + 2 * paddings[0] + strides[0] - 1\n ) // strides[0] + 1 if ceil_mode else (\n L - ksize[0] + 2 * paddings[0]) // strides[0] + 1\n\n out = np.zeros((N, C, L_out))\n for i in range(L_out):\n if adaptive:\n r_start = adaptive_start_index(i, L, ksize[0])\n r_end = adaptive_end_index(i, L, ksize[0])\n else:\n r_start = np.max((i * strides[0] - paddings[0], 0))\n r_end = np.min((i * strides[0] + ksize[0] - paddings[0], L))\n x_masked = x[:, :, r_start:r_end]\n\n field_size = (r_end - r_start) \\\n if (exclusive or adaptive) else (ksize[0])\n if data_type == np.int8 or data_type == np.uint8:\n out[:, :, i] = (np.rint(\n np.sum(x_masked, axis=(2, 3)) / field_size)).astype(data_type)\n else:\n out[:, :, i] = (np.sum(x_masked, axis=(2)) /\n field_size).astype(data_type)\n return out\n\n\nclass TestPool1d_API(unittest.TestCase):\n def setUp(self):\n np.random.seed(123)\n self.places = [fluid.CPUPlace()]\n if core.is_compiled_with_cuda():\n self.places.append(fluid.CUDAPlace(0))\n\n def check_avg_static_results(self, place):\n with fluid.program_guard(fluid.Program(), fluid.Program()):\n input = fluid.data(name=\"input\", shape=[2, 3, 32], dtype=\"float32\")\n result = F.avg_pool1d(input, kernel_size=2, stride=2, padding=0)\n\n input_np = np.random.random([2, 3, 32]).astype(\"float32\")\n result_np = avg_pool1D_forward_naive(\n input_np, ksize=[2], strides=[2], paddings=[0], ceil_mode=False)\n\n exe = fluid.Executor(place)\n fetches = exe.run(fluid.default_main_program(),\n feed={\"input\": input_np},\n fetch_list=[result])\n self.assertTrue(np.allclose(fetches[0], result_np))\n\n def check_avg_dygraph_results(self, place):\n with fluid.dygraph.guard(place):\n input_np = np.random.random([2, 3, 32]).astype(\"float32\")\n input = fluid.dygraph.to_variable(input_np)\n result = F.avg_pool1d(input, kernel_size=2, stride=2, padding=[0])\n\n result_np = avg_pool1D_forward_naive(\n input_np, ksize=[2], strides=[2], paddings=[0])\n\n self.assertTrue(np.allclose(result.numpy(), result_np))\n\n avg_pool1d_dg = paddle.nn.layer.AvgPool1d(\n kernel_size=2, stride=None, padding=0)\n result = avg_pool1d_dg(input)\n self.assertTrue(np.allclose(result.numpy(), result_np))\n\n def check_max_static_results(self, place):\n with fluid.program_guard(fluid.Program(), fluid.Program()):\n input = fluid.data(name=\"input\", shape=[2, 3, 32], dtype=\"float32\")\n result = F.max_pool1d(input, kernel_size=2, stride=2, padding=[0])\n\n input_np = np.random.random([2, 3, 32]).astype(\"float32\")\n result_np = max_pool1D_forward_naive(\n input_np, ksize=[2], strides=[2], paddings=[0])\n\n exe = fluid.Executor(place)\n fetches = exe.run(fluid.default_main_program(),\n feed={\"input\": input_np},\n fetch_list=[result])\n self.assertTrue(np.allclose(fetches[0], result_np))\n\n def check_max_dygraph_results(self, place):\n with fluid.dygraph.guard(place):\n input_np = np.random.random([2, 3, 32]).astype(\"float32\")\n input = fluid.dygraph.to_variable(input_np)\n result = F.max_pool1d(input, kernel_size=2, stride=2, padding=0)\n\n result_np = max_pool1D_forward_naive(\n input_np, ksize=[2], strides=[2], paddings=[0])\n\n self.assertTrue(np.allclose(result.numpy(), result_np))\n\n max_pool1d_dg = paddle.nn.layer.MaxPool1d(\n kernel_size=2, stride=None, padding=0)\n result = max_pool1d_dg(input)\n self.assertTrue(np.allclose(result.numpy(), result_np))\n\n def check_max_dygraph_padding_same(self, place):\n with fluid.dygraph.guard(place):\n input_np = np.random.random([2, 3, 32]).astype(\"float32\")\n input = fluid.dygraph.to_variable(input_np)\n result = F.max_pool1d(\n input, kernel_size=2, stride=2, padding=\"SAME\")\n\n result_np = max_pool1D_forward_naive(\n input_np, ksize=[2], strides=[2], paddings=[0])\n\n self.assertTrue(np.allclose(result.numpy(), result_np))\n\n def check_avg_dygraph_padding_same(self, place):\n with fluid.dygraph.guard(place):\n input_np = np.random.random([2, 3, 32]).astype(\"float32\")\n input = fluid.dygraph.to_variable(input_np)\n result = F.avg_pool1d(\n input, kernel_size=2, stride=2, padding=\"SAME\")\n\n result_np = avg_pool1D_forward_naive(\n input_np, ksize=[2], strides=[2], paddings=[0])\n\n self.assertTrue(np.allclose(result.numpy(), result_np))\n\n def test_pool1d(self):\n for place in self.places:\n\n self.check_max_dygraph_results(place)\n self.check_avg_dygraph_results(place)\n self.check_max_static_results(place)\n self.check_avg_static_results(place)\n self.check_max_dygraph_padding_same(place)\n self.check_avg_dygraph_padding_same(place)\n\n\nclass TestPool2dError_API(unittest.TestCase):\n def test_error_api(self):\n def run1():\n with fluid.dygraph.guard():\n input_np = np.random.uniform(-1, 1,\n [2, 3, 32]).astype(np.float32)\n input_pd = fluid.dygraph.to_variable(input_np)\n padding = [[2]]\n res_pd = F.max_pool1d(\n input_pd, kernel_size=2, stride=2, padding=padding)\n\n self.assertRaises(ValueError, run1)\n\n def run2():\n with fluid.dygraph.guard():\n input_np = np.random.uniform(-1, 1,\n [2, 3, 32, 32]).astype(np.float32)\n input_pd = fluid.dygraph.to_variable(input_np)\n padding = [[2]]\n res_pd = F.max_pool1d(\n input_pd, kernel_size=2, stride=2, padding=padding)\n\n self.assertRaises(ValueError, run2)\n\n def run3():\n with fluid.dygraph.guard():\n input_np = np.random.uniform(-1, 1,\n [2, 3, 32]).astype(np.float32)\n input_pd = fluid.dygraph.to_variable(input_np)\n padding = \"padding\"\n res_pd = F.max_pool1d(\n input_pd, kernel_size=2, stride=2, padding=padding)\n\n self.assertRaises(ValueError, run3)\n\n def run4():\n with fluid.dygraph.guard():\n input_np = np.random.uniform(-1, 1,\n [2, 3, 32, 32]).astype(np.float32)\n input_pd = fluid.dygraph.to_variable(input_np)\n padding = \"VALID\"\n res_pd = F.max_pool1d(\n input_pd,\n kernel_size=2,\n stride=2,\n padding=padding,\n ceil_mode=True)\n\n self.assertRaises(ValueError, run4)\n\n def run5():\n with fluid.dygraph.guard():\n input_np = np.random.uniform(-1, 1,\n [2, 3, 32]).astype(np.float32)\n input_pd = fluid.dygraph.to_variable(input_np)\n padding = \"VALID\"\n res_pd = F.max_pool1d(\n input_pd,\n kernel_size=2,\n stride=2,\n padding=padding,\n ceil_mode=True)\n\n self.assertRaises(ValueError, run5)\n\n def run6():\n with fluid.dygraph.guard():\n input_np = np.random.uniform(-1, 1,\n [2, 3, 32]).astype(np.float32)\n input_pd = fluid.dygraph.to_variable(input_np)\n padding = \"VALID\"\n res_pd = F.avg_pool1d(\n input_pd,\n kernel_size=2,\n stride=2,\n padding=padding,\n ceil_mode=True)\n\n self.assertRaises(ValueError, run6)\n\n def run7():\n with fluid.dygraph.guard():\n input_np = np.random.uniform(-1, 1,\n [2, 3, 32]).astype(np.float32)\n input_pd = fluid.dygraph.to_variable(input_np)\n padding = \"paddle\"\n res_pd = F.avg_pool1d(\n input_pd,\n kernel_size=2,\n stride=2,\n padding=padding,\n ceil_mode=True)\n\n self.assertRaises(ValueError, run7)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.random.random",
"numpy.allclose",
"numpy.random.seed",
"numpy.min",
"numpy.ceil",
"numpy.max",
"numpy.floor",
"numpy.random.uniform",
"numpy.zeros",
"numpy.sum"
]
] |
Mustrumion/MastersDegreeProjects
|
[
"5d389dbcb80d91888b7d4d38034ec9d33e496fba"
] |
[
"Visualisation/annealingColumnChartGrid.py"
] |
[
"import glob\nimport json\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport math\nimport os\nfrom matplotlib import rcParams\n# rcParams.update({'figure.autolayout': True})\n\ndef get_hours(time_str : str) -> float:\n \"\"\"Get hours from time.\"\"\"\n h, m, s = time_str.split(':')\n s = s.split('.')[0]\n return int(h) + int(m) / 60 + int(s) / 3600\n\ndef get_minutes(time_str : str) -> float:\n \"\"\"Get hours from time.\"\"\"\n h, m, s = time_str.split(':')\n s = s.split('.')[0]\n return int(h) * 60 + int(m) + int(s) / 60\n\n\ndef get_difficulty_from_path(path: str) -> int:\n if \"extreme\" in path:\n return 6\n if \"hard\" in path:\n return 5\n if \"medium\" in path:\n return 4\n if \"\\\\easy\\\\\" in path:\n return 3\n if \"very_easy\\\\\" in path:\n return 2\n if \"trivial\"in path:\n return 1\n\ndef get_length_from_path(path: str) -> str:\n if \"week\" in path:\n return \"week\"\n if \"month\" in path:\n return \"month\"\n\ndef get_channels_from_path(path: str):\n segments = os.path.split(path)\n char = os.path.split(segments[0])[1][0]\n return int(char) \n\n\ndirectory = 'C:/Users/Mustrum/Dropbox/MDP/solutions/annealing_best_tests_all2/'\nfileMask = '**/0*.json'\nfiles = [f for f in glob.glob(directory + fileMask, recursive=True)]\n\ncolumns=['name', 'difficulty', 'horizon', 'channel count', 'accept. function', 'accept. f. % improv.', 'acceptable %', 'loss [x1000]', 'loss % improvement', 'time [min]', 'iterations [x100 000]']\ndf = pd.DataFrame(columns = columns)\noutliers = pd.DataFrame(columns = columns)\n\nfor i, f in enumerate(files):\n with open(f) as json_file:\n pathSegments = os.path.split(f)\n fileName = os.path.splitext(pathSegments[1])[0]\n parentDir = os.path.split(pathSegments[0])[1]\n name = parentDir + \"/\" + fileName\n\n data = json.load(json_file)\n loss = data['WeightedLoss'] / 1000\n lossPercent = (data['WeightedLossBefore'] - data['WeightedLoss']) / data['WeightedLossBefore'] * 100\n accetability = data['IntegrityLossScore']\n if data['IntegrityLossScoreBefore'] == 0:\n accetabilityPercent = float('nan')\n else:\n accetabilityPercent = (data['IntegrityLossScoreBefore'] - data['IntegrityLossScore']) / data['IntegrityLossScoreBefore'] * 100\n accetabilePercent = (data['IntegrityLossScore'] == 0) * 100.0\n time = get_minutes(data['LastTimeElapsed'])\n iterations = data['NumberOfIterations'] / 100000\n\n difficulty = get_difficulty_from_path(f)\n horizon = get_length_from_path(f)\n channels = get_channels_from_path(f)\n\n if time > 59.99:\n print(f)\n outliers.loc[i] = [name, difficulty, horizon, channels,\n accetability, accetabilityPercent, accetabilePercent, loss, lossPercent, time, iterations]\n else:\n df.loc[i] = [name, difficulty, horizon, channels,\n accetability, accetabilityPercent, accetabilePercent, loss, lossPercent, time, iterations]\n\ndf[columns[1:4]] = df[columns[1:4]].astype(str) \ndf[columns[4:]] = df[columns[4:]].apply(pd.to_numeric)\n# df = df.apply(pd.to_numeric)\ndf.sort_values(by=['horizon', 'name'], inplace=True, ascending=[False, True])\noutliers.sort_values(by=['horizon', 'name'], inplace=True, ascending=[False, True])\ndf.to_csv (directory + 'plottedSolutionsData.csv', index = None, header=True)\noutliers[['loss [x1000]', 'loss % improvement', 'iterations [x100 000]']]= outliers[['loss [x1000]', 'loss % improvement', 'iterations [x100 000]']].round(3)\noutliers.to_csv (directory + 'outliersSolutionsData.csv', index = None, header=True)\nprint(df)\ng = sns.PairGrid(df, x_vars=columns[1:4], y_vars=columns[4:])\ng = g.map(sns.barplot)\nplt.subplots_adjust(top=0.95, bottom=0.05)\ng.fig.suptitle(\"Simulated annealing results categorized by the instances\")\n# plt.tight_layout()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"pandas.DataFrame",
"matplotlib.pyplot.subplots_adjust"
]
] |
swagner648/rainbownums
|
[
"ec801b13565cdc92aa43b7ca03cdfd2a09b08ef9"
] |
[
"rainbownum/RainbowCartesianSumsSim.py"
] |
[
"from .RainbowSim import RainbowSim\nimport numpy as np\n\n\nclass RbCartesianSumsEq(RainbowSim):\n def __init__(self, m, n, a, b=[0, 0]):\n super(RbCartesianSumsEq, self).__init__(m * n, a, Point(b[0], b[1]), False)\n self.M = m # rows\n self.N = n # columns\n self.sums = self.sets\n self.__generate_sums()\n\n def get_equation(self):\n eq = \"\"\n for i in self.a:\n eq += str(i) + \"x + \"\n eq = eq[:-2] + \"= \" + str(self.b) + \", M = \" + str(self.M) + \", N =\" + str(self.N)\n return eq\n\n def __generate_sums(self):\n values = list(range(1, self.k))\n self.__recur_gen_sums(values, 0)\n\n def __recur_gen_sums(self, values, loop):\n k = self.k - 1\n if loop == k:\n return\n stop_case = self.n - k + loop\n while values[loop] <= stop_case:\n self.__recur_gen_sums(values, loop + 1)\n if loop == k - 1:\n sum = Point(0, 0)\n out = [0 for _ in range(self.k)]\n for i in range(len(values)):\n sum = sum + self.a[i] * self.__translate(values[i])\n out[i] = values[i]\n valid = True\n sum = (sum - self.b) / -self.a[k]\n if sum.x == int(sum.x) and sum.y == int(sum.y) and sum.x <= self.M and sum.y <= self.N:\n out[k] = self.__point_to_i(sum)\n valid = self._set_leq_n(out, valid)\n valid = self._is_distinct(out, valid)\n out = self._decrement_if_not_mod(out, valid)\n self._add_set(out, valid)\n values[loop] = values[loop] + 1\n for lp in range(loop + 1, k):\n values[lp] = values[lp - 1] + 1\n\n def __translate(self, n):\n x = (n + self.N - 1) // self.N\n y = 1 + ((n - 1) % self.N)\n return Point(x, y)\n\n def __point_to_i(self, p):\n return self.N * (p.x - 1) + p.y\n\n def print_extreme_matrices(self, quantity=-1):\n if self.start != -1:\n temp = self.colorings.head\n while temp.next is not None:\n matrix = [temp.data[i * self.N:(i + 1) * self.N] for i in range((len(temp.data) + self.N - 1) // self.N )]\n print(np.matrix(matrix), \"\\n\")\n temp = temp.next\n print()\n\n def print_set_matrices(self):\n sum = self.sums[self.n].head.next\n while sum.next is not None:\n matrix = [[\"*\" for _ in range(self.N)] for _ in range(self.M)]\n for i in sum.data:\n p = self.__translate(i + 1)\n matrix[p.x - 1][p.y - 1] = \"0\"\n print(np.matrix(matrix), \"\\n\")\n sum = sum.next\n return\n\n def print_sets(self, nums=-1):\n print('Sets Generated:', end='')\n if nums is -1 and self.mod:\n nums = list(range(self.n))\n elif nums is -1 and not self.mod:\n nums = list(range(1, self.n + 1))\n for n in nums:\n if self.mod:\n temp = self.sets[n].head.next\n else:\n temp = self.sets[n - 1].head.next\n if self.mod:\n print('\\n', n, ':', temp, end='')\n else:\n if temp is not None:\n print('\\n', n, ':',\n '[%s]' % ', '.join(map(str, [self.__translate(i + 1) for i in temp.data])), end='')\n else:\n print('\\n', n, ':', temp, end='')\n if temp is not None:\n temp = temp.next\n while temp is not None:\n if self.mod:\n print(',', temp, end='')\n else:\n print(',', '[%s]' % ', '.join(map(str, [self.__translate(i + 1) for i in temp.data])),\n end='')\n temp = temp.next\n print(\"\\n\")\n\n\nclass Point:\n def __init__(self, x, y):\n if int(x) != x:\n raise TypeError(\"Points cannot have parameter of type double: x\")\n if int(y) != y:\n raise TypeError(\"Points cannot have parameter of type double: y\")\n self.x = int(x)\n self.y = int(y)\n\n def __add__(self, other):\n return Point(self.x + other.x, self.y + other.y)\n\n def __sub__(self, other):\n return Point(self.x - other.x, self.y - other.y)\n\n def __rmul__(self, other):\n return Point(other * self.x, other * self.y)\n\n def __truediv__(self, other):\n return Point(self.x / other, self.y / other)\n\n def __str__(self):\n return \"[\" + str(self.x) + \", \" + str(self.y) + \"]\"\n"
] |
[
[
"numpy.matrix"
]
] |
SCGTall/CS6375MachineLearning
|
[
"50ff7348eb583113b17084775a8679ce135c0487"
] |
[
"Assignment/Assignment 1/Assignment1.py"
] |
[
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\n# The true function\ndef f_true(x: float) -> float:\n return 6.0 * (np.sin(x + 2) + np.sin(2 * x + 4))\n\n# Get data points\n#np.random.seed(1) # This can make the result repeatable during debugging\nn = 750 # Number of data points\nx = np.random.uniform(-7.5, 7.5, n) # Training examples, in one dimension\ne = np.random.normal(0.0, 5.0, n) # Random Gaussian noise\ny = f_true(x) + e\n\nplt.figure()\nplt.title('Plot the Data')\n# Plot the data\nplt.scatter(x, y, 12, marker = 'o')\n\n# Plot the true function, which is really \"unknown\"\nx_true = np.arange(-7.5, 7.5, 0.05)\ny_true = f_true(x_true)\nplt.plot(x_true, y_true, marker = 'None', color = 'r')\n\n# scikit-learn has many tools and utilities for model selection\n# Use the fraction in code regradless of document's words\ntst_frac = 0.3 # Fraction of examples to sample for the test set\nval_frac = 0.1 # Fraction of examples to sample for the validation set\n\n# Use the same seed = 42\n# First, we use train_test_split to partition (x, y) into training and test sets\nx_trn, x_tst, y_trn, y_tst = train_test_split(x, y, test_size = tst_frac, random_state = 42)\n\n# Next, we use train_test_split to further partition (x_trn, ytrn) into training and validation sets\nx_trn, x_val, y_trn, y_val = train_test_split(x_trn, y_trn, test_size = val_frac, random_state = 42)\n\n# Plot the three subsets\nplt.figure()\nplt.title('Plot Three Subsets')\nplt.scatter(x_trn, y_trn, 12, marker = 'o', color = 'orange')\nplt.scatter(x_val, y_val, 12, marker = 'o', color = 'green')\nplt.scatter(x_tst, y_tst, 12, marker = 'o', color = 'blue')\n\n# Polynomial Basis Functions\ndef polynomial_transform(x: np.array(float), d: int) -> np.array(float):\n # *** Insert your code here ***\n return np.array([np.logspace(0, d, d+1, base=i) for i in x])\n\ndef train_model(phi: np.array(float), y: np.array(float)) -> np.array(float):\n # *** Insert your code here ***\n return np.linalg.inv(phi.transpose().dot(phi)).dot(phi.transpose()).dot(y)\n\ndef evaluate_model(phi: np.array(float), y: np.array(float), w: np.array(float)) -> float:\n # *** Insert your code here ***\n return np.mean(np.power((y - np.array([w.dot(i) for i in phi])), 2))\n\nw = {} # Dictionary to store all the trained models\nvalidationErr = {} # Validation error of the models\ntestErr = {} # Test error of all the models\n\nfor d in range(3, 25, 3): # Iterate over polynomial degree\n phi_trn = polynomial_transform(x_trn, d) # Transform training data into d dimensions\n w[d] = train_model(phi_trn, y_trn) # Learn model on training data\n phi_val = polynomial_transform(x_val, d) # Transform validation data into d dimensions\n validationErr[d] = evaluate_model(phi_val, y_val, w[d]) # Evaluate model on validation data\n phi_tst = polynomial_transform(x_tst, d) # Transform test data into d dimensions\n testErr[d] = evaluate_model(phi_tst, y_tst, w[d]) # Evaluate model on test data\n\n# Plot all the models\nplt.figure()\nplt.title('Analysis of d of Linear Basis Case')\nplt.plot(list(validationErr.keys()), list(validationErr.values()), marker = 'o', linewidth = 3, markersize = 12)\nplt.plot(list(testErr.keys()), list(testErr.values()), marker = 's', linewidth = 3, markersize = 12)\nplt.xlabel(\"Ploynomial degree\", fontsize = 16)\nplt.ylabel(\"Validation/Test error\", fontsize = 16)\nplt.xticks(list(validationErr.keys()), fontsize = 12)\nplt.legend(['Validation Error', 'Test Error'], fontsize = 16)\nplt.axis([2, 25, 15, 65])\n\nplt.figure()\nplt.title('Visualize All Learned Models of Linear Basis Case')\nplt.plot(x_true, y_true, marker = 'None', linewidth = 5, color = 'k')\n\nfor d in range(9, 25, 3):\n x_d = polynomial_transform(x_true, d)\n y_d = x_d @ w[d]\n plt.plot(x_true, y_d, marker = 'None', linewidth = 2)\n\nplt.legend(['true'] + list(range(9, 25, 3)))\nplt.axis([-8, 8, -15, 15])\n\n# Radial Basis Functions\ndef radial_basis_transform(x: np.array(float), b: np.array(float), gamma: float = 0.1) -> np.array(float):\n # *** Insert your code here ***\n return np.array([[(np.e ** (-gamma * ((xi - xj) ** 2))) for xj in b] for xi in x])\n\ndef train_ridge_model(phi: np.array(float), y: np.array(float), lam: float) -> np.array(float):\n # *** Insert your code here ***\n return np.linalg.inv(phi.transpose().dot(phi) + lam * np.eye(len(phi))).dot(phi.transpose()).dot(y)\n\n# *** Insert your code here ***\nw2 = {} # Dictionary to store all the trained models for radial basis\nvalidationErr2 = {} # Validation error of the models for radial basis\ntestErr2 = {} # Test error of all the models for radial basis\n\nphi_trn2 = radial_basis_transform(x_trn, x_trn) # Transform training data into n dimensions\nphi_val2 = radial_basis_transform(x_val, x_trn) # Transform validation data into n dimensions\nphi_tst2 = radial_basis_transform(x_tst, x_trn) # Transform test data into n dimensions\n\nfor l in range(-3, 4): # Iterate over l\n lam = 10 ** l # Get lambda from l\n w2[l] = train_ridge_model(phi_trn2, y_trn, lam) # Learn model on training data\n validationErr2[l] = evaluate_model(phi_val2, y_val, w2[l]) # Evaluate model on validation data\n testErr2[l] = evaluate_model(phi_tst2, y_tst, w2[l]) # Evaluate model on test data\n\n# Plot all the models\nplt.figure()\nplt.title('Analysis of lambda of Radial Basis Case')\nplt.plot(list(validationErr2.keys()), list(validationErr2.values()), marker = 'o', linewidth = 3, markersize = 12)\nplt.plot(list(testErr2.keys()), list(testErr2.values()), marker = 's', linewidth = 3, markersize = 12)\nplt.xlabel(\"Radial degree\", fontsize = 16)\nplt.ylabel(\"Validation/Test error\", fontsize = 16)\nplt.xticks(list(validationErr2.keys()), [10 ** i for i in list(validationErr2.keys())], fontsize = 12)\nplt.legend(['Validation Error', 'Test Error'], fontsize = 16)\nplt.axis([-4, 4, 15, 65])\n\nplt.figure()\nplt.title('Visualize All Learned Models of Radial Basis Case')\nplt.plot(x_true, y_true, marker = 'None', linewidth = 5, color = 'k')\n\nfor l in range(-3, 4):\n x_d = radial_basis_transform(x_true, x_trn)\n y_d = x_d @ w2[l]\n plt.plot(x_true, y_d, marker = 'None', linewidth = 2)\n\nplt.legend(['true'] + list(np.logspace(-3, 3, 7)))\nplt.axis([-8, 8, -15, 15])\n\n# Wait to check results\nplt.show() # Image will not show without this code"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.logspace",
"numpy.arange",
"sklearn.model_selection.train_test_split",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.random.normal",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.axis",
"numpy.random.uniform",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
pbaiz/openrec
|
[
"a00de2345844858194ef43ab6845342114a5be93",
"a00de2345844858194ef43ab6845342114a5be93",
"a00de2345844858194ef43ab6845342114a5be93"
] |
[
"openrec/tf1/utils/samplers/random_pointwise_sampler.py",
"openrec/tf2/recommenders/dlrm.py",
"openrec/tf1/utils/evaluators/precision.py"
] |
[
"import numpy as np\nimport random\nfrom openrec.tf1.utils.samplers import Sampler\n\ndef RandomPointwiseSampler(dataset, batch_size, num_process=5, seed=100):\n \n random.seed(seed)\n def batch(dataset, batch_size=batch_size):\n \n while True:\n input_npy = np.zeros(batch_size, dtype=[('user_id', np.int32),\n ('item_id', np.int32),\n ('label', np.float32)])\n \n for ind in range(batch_size):\n user_id = random.randint(0, dataset.total_users()-1)\n item_id = random.randint(0, dataset.total_items()-1)\n label = 1.0 if dataset.is_positive(user_id, item_id) else 0.0\n input_npy[ind] = (user_id, item_id, label)\n yield input_npy\n \n s = Sampler(dataset=dataset, generate_batch=batch, num_process=num_process)\n \n return s",
"import sys\nimport tensorflow as tf\nfrom tensorflow.keras import Model\nfrom openrec.tf2.modules import LatentFactor, SecondOrderFeatureInteraction, MLP\n\nclass DLRM(Model):\n \n def __init__(\n self, \n m_spa,\n ln_emb,\n ln_bot,\n ln_top,\n arch_interaction_op='dot',\n arch_interaction_itself=False,\n sigmoid_bot=False,\n sigmoid_top=True,\n loss_func='mse',\n loss_threshold=0.0):\n \n '''\n m_spa: the dimensionality of sparse feature embeddings\n ln_emb: the size of sparse feature embeddings (num_instances)\n ln_bot: the size of the bottom MLP\n ln_top: the size of the top MLP\n '''\n \n super(DLRM, self).__init__()\n \n self._loss_threshold = loss_threshold\n self._loss_func = loss_func\n self._latent_factors = [LatentFactor(num_instances=num, \n dim=m_spa) for num in ln_emb]\n self._mlp_bot = MLP(units_list=ln_bot, \n out_activation='sigmoid' if sigmoid_bot else 'relu')\n self._mlp_top = MLP(units_list=ln_top, \n out_activation='sigmoid' if sigmoid_top else 'relu')\n \n self._dot_interaction = None\n if arch_interaction_op == 'dot':\n self._dot_interaction = SecondOrderFeatureInteraction(\n self_interaction=arch_interaction_itself\n )\n \n elif self._arch_interaction_op != 'cat':\n sys.exit(\n \"ERROR: arch_interaction_op=\"\n + self._arch_interaction_op\n + \" is not supported\"\n )\n \n if loss_func == 'mse':\n self._loss = tf.keras.losses.MeanSquaredError()\n elif loss_func == 'bce':\n self._loss = tf.keras.losses.BinaryCrossentropy()\n else:\n sys.exit(\n \"ERROR: loss_func=\"\n + loss_func\n + \" is not supported\"\n )\n \n def call(self, dense_features, sparse_features, label):\n \n '''\n dense_features shape: [batch_size, num of dense features]\n sparse_features shape: [batch_size, num_of_sparse_features]\n label shape: [batch_size]\n '''\n \n prediction = self.inference(dense_features, sparse_features)\n loss = self._loss(y_true=label, \n y_pred=prediction)\n return loss\n \n def inference(self, dense_features, sparse_features):\n \n '''\n dense_features shape: [batch_size, num of dense features]\n sparse_features shape: [num_of_sparse_features, batch_size]\n '''\n \n sparse_emb_vecs = list(map(lambda pair: pair[1](pair[0]), \n zip(tf.unstack(sparse_features, axis=1), \n self._latent_factors)))\n \n dense_emb_vec = self._mlp_bot(dense_features)\n \n if self._dot_interaction is not None:\n prediction = self._mlp_top(tf.concat([dense_emb_vec, \n self._dot_interaction(sparse_emb_vecs + [dense_emb_vec])],\n axis=1))\n else:\n prediction = self._mlp_top(tf.concat(sparse_emb_vecs + [dense_emb_vec], \n axis=1))\n \n if 0.0 < self._loss_threshold and self._loss_threshold < 1.0:\n prediction = tf.clip_by_value(prediction, self._loss_threshold, 1.0 - self._loss_threshold)\n \n return tf.reshape(prediction, [-1])\n",
"import numpy as np\nfrom openrec.tf1.utils.evaluators import Evaluator\n\nclass Precision(Evaluator):\n\n def __init__(self, precision_at, name='Precision'):\n \n self._precision_at = np.array(precision_at)\n\n super(Precision, self).__init__(etype='rank', name=name)\n\n def compute(self, rank_above, negative_num):\n\n del negative_num\n results = np.zeros(len(self._precision_at))\n for rank in rank_above:\n results += (rank <= self._precision_at).astype(np.float32)\n\n return results / self._precision_at\n"
] |
[
[
"numpy.zeros"
],
[
"tensorflow.clip_by_value",
"tensorflow.concat",
"tensorflow.unstack",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.reshape",
"tensorflow.keras.losses.BinaryCrossentropy"
],
[
"numpy.array"
]
] |
PaulEmmanuelSotir/kedro
|
[
"0b0d095bfc11324dc5a0fcf6e8dec891426b3a01"
] |
[
"tests/framework/context/test_context.py"
] |
[
"# Copyright 2020 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\"QuantumBlack\") name and logo\n# (either separately or in combination, \"QuantumBlack Trademarks\") are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport configparser\nimport json\nimport re\nimport sys\nfrom pathlib import Path, PurePath, PurePosixPath, PureWindowsPath\nfrom time import sleep\nfrom typing import Any, Dict\n\nimport pandas as pd\nimport pytest\nimport yaml\nfrom pandas.util.testing import assert_frame_equal\n\nfrom kedro import __version__ as kedro_version\nfrom kedro.config import MissingConfigException\nfrom kedro.extras.datasets.pandas import CSVDataSet\nfrom kedro.framework.context import (\n KedroContext,\n KedroContextError,\n validate_source_path,\n)\nfrom kedro.framework.context.context import (\n _convert_paths_to_absolute_posix,\n _is_relative_path,\n _validate_layers_for_transcoding,\n)\nfrom kedro.io.core import Version, generate_timestamp\nfrom kedro.pipeline import Pipeline, node\nfrom kedro.runner import ParallelRunner, SequentialRunner\n\n\ndef _get_local_logging_config():\n return {\n \"version\": 1,\n \"formatters\": {\n \"simple\": {\"format\": \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"}\n },\n \"root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n \"loggers\": {\n \"kedro\": {\"level\": \"INFO\", \"handlers\": [\"console\"], \"propagate\": False}\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"simple\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"info_file_handler\": {\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"simple\",\n \"filename\": \"logs/info.log\",\n },\n }\n\n\ndef _write_yaml(filepath: Path, config: Dict):\n filepath.parent.mkdir(parents=True, exist_ok=True)\n yaml_str = yaml.dump(config)\n filepath.write_text(yaml_str)\n\n\ndef _write_json(filepath: Path, config: Dict):\n filepath.parent.mkdir(parents=True, exist_ok=True)\n json_str = json.dumps(config)\n filepath.write_text(json_str)\n\n\ndef _write_dummy_ini(filepath: Path):\n filepath.parent.mkdir(parents=True, exist_ok=True)\n config = configparser.ConfigParser()\n config[\"prod\"] = {\"url\": \"postgresql://user:pass@url_prod/db\"}\n config[\"staging\"] = {\"url\": \"postgresql://user:pass@url_staging/db\"}\n with filepath.open(\"wt\") as configfile: # save\n config.write(configfile)\n\n\[email protected]\ndef base_config(tmp_path):\n cars_filepath = (tmp_path / \"cars.csv\").as_posix()\n trains_filepath = (tmp_path / \"trains.csv\").as_posix()\n\n return {\n \"trains\": {\"type\": \"pandas.CSVDataSet\", \"filepath\": trains_filepath},\n \"cars\": {\n \"type\": \"pandas.CSVDataSet\",\n \"filepath\": cars_filepath,\n \"save_args\": {\"index\": True},\n },\n }\n\n\[email protected]\ndef local_config(tmp_path):\n cars_filepath = (tmp_path / \"cars.csv\").as_posix()\n boats_filepath = (tmp_path / \"boats.csv\").as_posix()\n # use one dataset with a relative filepath\n horses_filepath = \"horses.csv\"\n return {\n \"cars\": {\n \"type\": \"pandas.CSVDataSet\",\n \"filepath\": cars_filepath,\n \"save_args\": {\"index\": False},\n \"versioned\": True,\n },\n \"boats\": {\n \"type\": \"pandas.CSVDataSet\",\n \"filepath\": boats_filepath,\n \"versioned\": True,\n \"layer\": \"raw\",\n },\n \"horses\": {\n \"type\": \"pandas.CSVDataSet\",\n \"filepath\": horses_filepath,\n \"versioned\": True,\n },\n }\n\n\[email protected](params=[None])\ndef env(request):\n return request.param\n\n\[email protected]\ndef config_dir(tmp_path, base_config, local_config, env):\n env = \"local\" if env is None else env\n proj_catalog = tmp_path / \"conf\" / \"base\" / \"catalog.yml\"\n env_catalog = tmp_path / \"conf\" / str(env) / \"catalog.yml\"\n env_credentials = tmp_path / \"conf\" / str(env) / \"credentials.yml\"\n env_logging = tmp_path / \"conf\" / str(env) / \"logging.yml\"\n parameters = tmp_path / \"conf\" / \"base\" / \"parameters.json\"\n db_config_path = tmp_path / \"conf\" / \"base\" / \"db.ini\"\n project_parameters = {\"param1\": 1, \"param2\": 2, \"param3\": {\"param4\": 3}}\n _write_yaml(proj_catalog, base_config)\n _write_yaml(env_catalog, local_config)\n _write_yaml(env_credentials, local_config)\n _write_yaml(env_logging, _get_local_logging_config())\n _write_json(parameters, project_parameters)\n _write_dummy_ini(db_config_path)\n\n\[email protected]\ndef dummy_dataframe():\n return pd.DataFrame({\"col1\": [1, 2], \"col2\": [4, 5], \"col3\": [5, 6]})\n\n\ndef identity(input1: str):\n return input1 # pragma: no cover\n\n\ndef bad_node(x):\n raise ValueError(\"Oh no!\")\n\n\nbad_pipeline_middle = Pipeline(\n [\n node(identity, \"cars\", \"boats\", name=\"node1\", tags=[\"tag1\"]),\n node(identity, \"boats\", \"trains\", name=\"node2\"),\n node(bad_node, \"trains\", \"ships\", name=\"nodes3\"),\n node(identity, \"ships\", \"planes\", name=\"node4\"),\n ],\n tags=\"bad_pipeline\",\n)\n\nexpected_message_middle = (\n \"There are 2 nodes that have not run.\\n\"\n \"You can resume the pipeline run by adding the following \"\n \"argument to your previous command:\\n\"\n ' --from-nodes \"nodes3\"'\n)\n\n\nbad_pipeline_head = Pipeline(\n [\n node(bad_node, \"cars\", \"boats\", name=\"node1\", tags=[\"tag1\"]),\n node(identity, \"boats\", \"trains\", name=\"node2\"),\n node(identity, \"trains\", \"ships\", name=\"nodes3\"),\n node(identity, \"ships\", \"planes\", name=\"node4\"),\n ],\n tags=\"bad_pipeline\",\n)\n\nexpected_message_head = (\n \"There are 4 nodes that have not run.\\n\"\n \"You can resume the pipeline run by adding the following \"\n \"argument to your previous command:\\n\"\n)\n\n\nclass DummyContext(KedroContext):\n project_name = \"bob\"\n project_version = kedro_version\n package_name = \"bob\"\n\n def _get_pipelines(self) -> Dict[str, Pipeline]:\n pipeline = Pipeline(\n [\n node(identity, \"cars\", \"boats\", name=\"node1\", tags=[\"tag1\"]),\n node(identity, \"boats\", \"trains\", name=\"node2\"),\n node(identity, \"trains\", \"ships\", name=\"node3\"),\n node(identity, \"ships\", \"planes\", name=\"node4\"),\n ],\n tags=\"pipeline\",\n )\n return {\"__default__\": pipeline}\n\n\nclass DummyContextWithPipelinePropertyOnly(KedroContext):\n \"\"\"\n We need this for testing the backward compatibility.\n \"\"\"\n\n # pylint: disable=abstract-method\n\n project_name = \"bob_old\"\n project_version = kedro_version\n package_name = \"bob_old\"\n\n @property\n def pipeline(self) -> Pipeline:\n return Pipeline(\n [\n node(identity, \"cars\", \"boats\", name=\"node1\", tags=[\"tag1\"]),\n node(identity, \"boats\", \"trains\", name=\"node2\"),\n node(identity, \"trains\", \"ships\", name=\"node3\"),\n node(identity, \"ships\", \"planes\", name=\"node4\"),\n ],\n tags=\"pipeline\",\n )\n\n\[email protected](params=[None])\ndef extra_params(request):\n return request.param\n\n\[email protected]\ndef dummy_context(tmp_path, mocker, env, extra_params):\n # Disable logging.config.dictConfig in KedroContext._setup_logging as\n # it changes logging.config and affects other unit tests\n mocker.patch(\"logging.config.dictConfig\")\n return DummyContext(str(tmp_path), env=env, extra_params=extra_params)\n\n\[email protected](\"config_dir\")\nclass TestKedroContext:\n def test_attributes(self, tmp_path, dummy_context):\n assert dummy_context.project_name == \"bob\"\n assert dummy_context.project_version == kedro_version\n assert isinstance(dummy_context.project_path, Path)\n assert dummy_context.project_path == tmp_path.resolve()\n\n def test_get_catalog_always_using_absolute_path(self, dummy_context):\n conf_catalog = dummy_context.config_loader.get(\"catalog*\")\n\n # even though the raw configuration uses relative path\n assert conf_catalog[\"horses\"][\"filepath\"] == \"horses.csv\"\n\n # the catalog and its dataset should be loaded using absolute path\n # based on the project path\n catalog = dummy_context._get_catalog()\n ds_path = catalog._data_sets[\"horses\"]._filepath\n assert PurePath(ds_path.as_posix()).is_absolute()\n assert (\n ds_path.as_posix()\n == (dummy_context._project_path / \"horses.csv\").as_posix()\n )\n\n def test_get_catalog_validates_layers(self, dummy_context, mocker):\n mock_validate = mocker.patch(\n \"kedro.framework.context.context._validate_layers_for_transcoding\"\n )\n\n catalog = dummy_context._get_catalog()\n\n mock_validate.assert_called_once_with(catalog)\n\n def test_catalog(self, dummy_context, dummy_dataframe):\n assert dummy_context.catalog.layers == {\"raw\": {\"boats\"}}\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n reloaded_df = dummy_context.catalog.load(\"cars\")\n assert_frame_equal(reloaded_df, dummy_dataframe)\n\n def test_io(self, dummy_context, dummy_dataframe):\n dummy_context.io.save(\"cars\", dummy_dataframe)\n reloaded_df = dummy_context.io.load(\"cars\")\n assert_frame_equal(reloaded_df, dummy_dataframe)\n\n @pytest.mark.parametrize(\n \"extra_params\",\n [None, {}, {\"foo\": \"bar\", \"baz\": [1, 2], \"qux\": None}],\n indirect=True,\n )\n def test_params(self, dummy_context, extra_params):\n extra_params = extra_params or {}\n expected = {\"param1\": 1, \"param2\": 2, \"param3\": {\"param4\": 3}, **extra_params}\n assert dummy_context.params == expected\n\n @pytest.mark.parametrize(\n \"param,expected\",\n [(\"params:param3\", {\"param4\": 3}), (\"params:param3.param4\", 3)],\n )\n def test_nested_params(self, param, expected, dummy_context):\n param = dummy_context.catalog.load(param)\n assert param == expected\n\n @pytest.mark.parametrize(\n \"extra_params\",\n [None, {}, {\"foo\": \"bar\", \"baz\": [1, 2], \"qux\": None}],\n indirect=True,\n )\n def test_params_missing(self, dummy_context, mocker, extra_params):\n mock_config_loader = mocker.patch.object(DummyContext, \"config_loader\")\n mock_config_loader.get.side_effect = MissingConfigException(\"nope\")\n extra_params = extra_params or {}\n\n pattern = \"Parameters not found in your Kedro project config\"\n with pytest.warns(UserWarning, match=pattern):\n actual = dummy_context.params\n assert actual == extra_params\n\n def test_config_loader(self, dummy_context):\n params = dummy_context.config_loader.get(\"parameters*\")\n db_conf = dummy_context.config_loader.get(\"db*\")\n catalog = dummy_context.config_loader.get(\"catalog*\")\n\n assert params[\"param1\"] == 1\n assert db_conf[\"prod\"][\"url\"] == \"postgresql://user:pass@url_prod/db\"\n\n assert catalog[\"trains\"][\"type\"] == \"pandas.CSVDataSet\"\n assert catalog[\"cars\"][\"type\"] == \"pandas.CSVDataSet\"\n assert catalog[\"boats\"][\"type\"] == \"pandas.CSVDataSet\"\n assert not catalog[\"cars\"][\"save_args\"][\"index\"]\n\n def test_default_env(self, dummy_context):\n assert dummy_context.env == \"local\"\n\n @pytest.mark.parametrize(\n \"invalid_version\", [\"0.13.0\", \"10.0\", \"101.1\", \"100.0\", \"-0\"]\n )\n def test_invalid_version(self, tmp_path, mocker, invalid_version):\n # Disable logging.config.dictConfig in KedroContext._setup_logging as\n # it changes logging.config and affects other unit tests\n mocker.patch(\"logging.config.dictConfig\")\n\n class _DummyContext(KedroContext):\n project_name = \"bob\"\n package_name = \"bob\"\n project_version = invalid_version\n\n def _get_pipelines(self) -> Dict[str, Pipeline]:\n return {\"__default__\": Pipeline([])} # pragma: no cover\n\n pattern = (\n r\"Your Kedro project version {} does not match \"\n r\"Kedro package version {} you are running. \".format(\n invalid_version, kedro_version\n )\n )\n with pytest.raises(KedroContextError, match=pattern):\n _DummyContext(str(tmp_path))\n\n @pytest.mark.parametrize(\"env\", [\"custom_env\"], indirect=True)\n def test_custom_env(self, dummy_context, env):\n assert dummy_context.env == env\n\n def test_missing_parameters(self, tmp_path, mocker):\n parameters = tmp_path / \"conf\" / \"base\" / \"parameters.json\"\n parameters.unlink()\n\n # Disable logging.config.dictConfig in KedroContext._setup_logging as\n # it changes logging.config and affects other unit tests\n mocker.patch(\"logging.config.dictConfig\")\n\n pattern = \"Parameters not found in your Kedro project config.\"\n with pytest.warns(UserWarning, match=re.escape(pattern)):\n DummyContext( # pylint: disable=expression-not-assigned\n str(tmp_path)\n ).catalog\n\n def test_missing_credentials(self, tmp_path, mocker):\n env_credentials = tmp_path / \"conf\" / \"local\" / \"credentials.yml\"\n env_credentials.unlink()\n\n # Disable logging.config.dictConfig in KedroContext._setup_logging as\n # it changes logging.config and affects other unit tests\n mocker.patch(\"logging.config.dictConfig\")\n\n pattern = \"Credentials not found in your Kedro project config.\"\n with pytest.warns(UserWarning, match=re.escape(pattern)):\n DummyContext( # pylint: disable=expression-not-assigned\n str(tmp_path)\n ).catalog\n\n def test_pipeline(self, dummy_context):\n assert dummy_context.pipeline.nodes[0].inputs == [\"cars\"]\n assert dummy_context.pipeline.nodes[0].outputs == [\"boats\"]\n assert dummy_context.pipeline.nodes[1].inputs == [\"boats\"]\n assert dummy_context.pipeline.nodes[1].outputs == [\"trains\"]\n\n def test_pipelines(self, dummy_context):\n assert len(dummy_context.pipelines) == 1\n assert len(dummy_context.pipelines[\"__default__\"].nodes) == 4\n\n def test_setup_logging_using_absolute_path(self, tmp_path, mocker):\n mocked_dict_config = mocker.patch(\"logging.config.dictConfig\")\n dummy_context = DummyContext(str(tmp_path))\n called_args = mocked_dict_config.call_args[0][0]\n assert (\n called_args[\"info_file_handler\"][\"filename\"]\n == (dummy_context._project_path / \"logs\" / \"info.log\").as_posix()\n )\n\n\[email protected](\"config_dir\")\nclass TestKedroContextRun:\n def test_run_output(self, dummy_context, dummy_dataframe):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n outputs = dummy_context.run()\n pd.testing.assert_frame_equal(outputs[\"planes\"], dummy_dataframe)\n\n def test_run_no_output(self, dummy_context, dummy_dataframe):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n outputs = dummy_context.run(node_names=[\"node1\"])\n assert not outputs\n\n def test_default_run(self, dummy_context, dummy_dataframe, caplog):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n dummy_context.run()\n\n log_msgs = [record.getMessage() for record in caplog.records]\n log_names = [record.name for record in caplog.records]\n\n assert \"kedro.runner.sequential_runner\" in log_names\n assert \"Pipeline execution completed successfully.\" in log_msgs\n\n def test_sequential_run_arg(self, dummy_context, dummy_dataframe, caplog):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n dummy_context.run(runner=SequentialRunner())\n\n log_msgs = [record.getMessage() for record in caplog.records]\n log_names = [record.name for record in caplog.records]\n assert \"kedro.runner.sequential_runner\" in log_names\n assert \"Pipeline execution completed successfully.\" in log_msgs\n\n @pytest.mark.skipif(\n sys.platform.startswith(\"win\"), reason=\"Due to bug in parallel runner\"\n )\n def test_parallel_run_arg(self, dummy_context, dummy_dataframe, caplog, mocker):\n mocker.patch(\n \"kedro.framework.context.context.load_context\", return_value=dummy_context\n )\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n dummy_context.run(runner=ParallelRunner())\n\n log_msgs = [record.getMessage() for record in caplog.records]\n log_names = [record.name for record in caplog.records]\n assert \"kedro.runner.parallel_runner\" in log_names\n assert \"Pipeline execution completed successfully.\" in log_msgs\n\n def test_run_with_node_names(self, dummy_context, dummy_dataframe, caplog):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n dummy_context.run(node_names=[\"node1\"])\n\n log_msgs = [record.getMessage() for record in caplog.records]\n assert \"Running node: node1: identity([cars]) -> [boats]\" in log_msgs\n assert \"Pipeline execution completed successfully.\" in log_msgs\n assert \"Running node: node2: identity([boats]) -> [trains]\" not in log_msgs\n\n def test_run_with_node_names_and_tags(self, dummy_context, dummy_dataframe, caplog):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n dummy_context.run(node_names=[\"node1\"], tags=[\"tag1\", \"pipeline\"])\n\n log_msgs = [record.getMessage() for record in caplog.records]\n assert \"Running node: node1: identity([cars]) -> [boats]\" in log_msgs\n assert \"Pipeline execution completed successfully.\" in log_msgs\n assert \"Running node: node2: identity([boats]) -> [trains]\" not in log_msgs\n\n def test_run_with_tags(self, dummy_context, dummy_dataframe, caplog):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n dummy_context.run(tags=[\"tag1\"])\n log_msgs = [record.getMessage() for record in caplog.records]\n\n assert \"Completed 1 out of 1 tasks\" in log_msgs\n assert \"Running node: node1: identity([cars]) -> [boats]\" in log_msgs\n assert \"Running node: node2: identity([boats]) -> [trains]\" not in log_msgs\n assert \"Pipeline execution completed successfully.\" in log_msgs\n\n def test_run_with_wrong_tags(self, dummy_context, dummy_dataframe):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n pattern = r\"Pipeline contains no nodes with tags: \\['non\\-existent'\\]\"\n with pytest.raises(KedroContextError, match=pattern):\n dummy_context.run(tags=[\"non-existent\"])\n\n def test_run_from_nodes(self, dummy_context, dummy_dataframe, caplog):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n dummy_context.run(from_nodes=[\"node1\"])\n\n log_msgs = [record.getMessage() for record in caplog.records]\n assert \"Completed 4 out of 4 tasks\" in log_msgs\n assert \"Running node: node1: identity([cars]) -> [boats]\" in log_msgs\n assert \"Pipeline execution completed successfully.\" in log_msgs\n\n def test_run_to_nodes(self, dummy_context, dummy_dataframe, caplog):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n dummy_context.run(to_nodes=[\"node2\"])\n\n log_msgs = [record.getMessage() for record in caplog.records]\n assert \"Completed 2 out of 2 tasks\" in log_msgs\n assert \"Running node: node1: identity([cars]) -> [boats]\" in log_msgs\n assert \"Running node: node2: identity([boats]) -> [trains]\" in log_msgs\n assert \"Running node: node3: identity([trains]) -> [ships]\" not in log_msgs\n assert \"Pipeline execution completed successfully.\" in log_msgs\n\n def test_run_with_node_range(self, dummy_context, dummy_dataframe, caplog):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n dummy_context.run(from_nodes=[\"node1\"], to_nodes=[\"node3\"])\n\n log_msgs = [record.getMessage() for record in caplog.records]\n assert \"Completed 3 out of 3 tasks\" in log_msgs\n assert \"Running node: node1: identity([cars]) -> [boats]\" in log_msgs\n assert \"Running node: node2: identity([boats]) -> [trains]\" in log_msgs\n assert \"Running node: node3: identity([trains]) -> [ships]\" in log_msgs\n assert \"Pipeline execution completed successfully.\" in log_msgs\n\n def test_run_with_invalid_node_range(self, dummy_context, dummy_dataframe):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n pattern = \"Pipeline contains no nodes\"\n\n with pytest.raises(KedroContextError, match=pattern):\n dummy_context.run(from_nodes=[\"node3\"], to_nodes=[\"node1\"])\n\n def test_run_from_inputs(self, dummy_context, dummy_dataframe, caplog):\n for dataset in (\"cars\", \"trains\", \"boats\"):\n dummy_context.catalog.save(dataset, dummy_dataframe)\n dummy_context.run(from_inputs=[\"trains\"])\n\n log_msgs = [record.getMessage() for record in caplog.records]\n assert \"Completed 2 out of 2 tasks\" in log_msgs\n assert \"Running node: node3: identity([trains]) -> [ships]\" in log_msgs\n assert \"Running node: node4: identity([ships]) -> [planes]\" in log_msgs\n assert \"Pipeline execution completed successfully.\" in log_msgs\n\n def test_run_load_versions(self, tmp_path, dummy_context, dummy_dataframe, mocker):\n class DummyContext(KedroContext):\n project_name = \"bob\"\n package_name = \"bob\"\n project_version = kedro_version\n\n def _get_pipelines(self) -> Dict[str, Pipeline]:\n return {\"__default__\": Pipeline([node(identity, \"cars\", \"boats\")])}\n\n mocker.patch(\"logging.config.dictConfig\")\n dummy_context = DummyContext(str(tmp_path))\n filepath = (dummy_context.project_path / \"cars.csv\").as_posix()\n\n old_save_version = generate_timestamp()\n old_df = pd.DataFrame({\"col1\": [0, 0], \"col2\": [0, 0], \"col3\": [0, 0]})\n old_csv_data_set = CSVDataSet(\n filepath=filepath,\n save_args={\"sep\": \",\"},\n version=Version(None, old_save_version),\n )\n old_csv_data_set.save(old_df)\n\n sleep(0.5)\n new_save_version = generate_timestamp()\n new_csv_data_set = CSVDataSet(\n filepath=filepath,\n save_args={\"sep\": \",\"},\n version=Version(None, new_save_version),\n )\n new_csv_data_set.save(dummy_dataframe)\n\n load_versions = {\"cars\": old_save_version}\n dummy_context.run(load_versions=load_versions)\n assert not dummy_context.catalog.load(\"boats\").equals(dummy_dataframe)\n assert dummy_context.catalog.load(\"boats\").equals(old_df)\n\n def test_run_with_empty_pipeline(self, tmp_path, mocker):\n class DummyContext(KedroContext):\n project_name = \"bob\"\n package_name = \"bob\"\n project_version = kedro_version\n\n def _get_pipelines(self) -> Dict[str, Pipeline]:\n return {\"__default__\": Pipeline([])}\n\n mocker.patch(\"logging.config.dictConfig\")\n dummy_context = DummyContext(str(tmp_path))\n assert dummy_context.project_name == \"bob\"\n assert dummy_context.project_version == kedro_version\n pattern = \"Pipeline contains no nodes\"\n with pytest.raises(KedroContextError, match=pattern):\n dummy_context.run()\n\n @pytest.mark.parametrize(\n \"context_pipeline,expected_message\",\n [\n (bad_pipeline_middle, expected_message_middle),\n (bad_pipeline_head, expected_message_head),\n ], # pylint: disable=too-many-arguments\n )\n def test_run_failure_prompts_resume_command(\n self,\n mocker,\n tmp_path,\n dummy_dataframe,\n caplog,\n context_pipeline,\n expected_message,\n ):\n class BadContext(KedroContext):\n project_name = \"fred\"\n package_name = \"fred\"\n project_version = kedro_version\n\n def _get_pipelines(self) -> Dict[str, Pipeline]:\n return {\"__default__\": context_pipeline}\n\n mocker.patch(\"logging.config.dictConfig\")\n\n bad_context = BadContext(str(tmp_path))\n bad_context.catalog.save(\"cars\", dummy_dataframe)\n with pytest.raises(ValueError, match=\"Oh no\"):\n bad_context.run()\n\n actual_messages = [\n record.getMessage()\n for record in caplog.records\n if record.levelname == \"WARNING\"\n ]\n\n assert expected_message in actual_messages\n\n def test_missing_pipeline_name(self, dummy_context, dummy_dataframe):\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n\n with pytest.raises(KedroContextError, match=\"Failed to find the pipeline\"):\n dummy_context.run(pipeline_name=\"invalid-name\")\n\n def test_without_get_pipeline_deprecated(\n self, dummy_dataframe, mocker, tmp_path, env\n ):\n \"\"\"\n The old way of providing a `pipeline` context property is deprecated,\n but still works, yielding a warning message.\n \"\"\"\n mocker.patch(\"logging.config.dictConfig\")\n dummy_context = DummyContextWithPipelinePropertyOnly(str(tmp_path), env=env)\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n\n msg = \"You are using the deprecated pipeline construction mechanism\"\n with pytest.warns(DeprecationWarning, match=msg):\n outputs = dummy_context.run()\n\n pd.testing.assert_frame_equal(outputs[\"planes\"], dummy_dataframe)\n\n def test_without_get_pipeline_error(self, dummy_dataframe, mocker, tmp_path, env):\n \"\"\"\n The old way of providing a `pipeline` context property is deprecated,\n but still works, yielding a warning message.\n If you try to run a sub-pipeline by name - it's an error.\n \"\"\"\n\n mocker.patch(\"logging.config.dictConfig\")\n dummy_context = DummyContextWithPipelinePropertyOnly(str(tmp_path), env=env)\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n\n error_msg = \"The project is not fully migrated to use multiple pipelines.\"\n\n with pytest.raises(KedroContextError, match=error_msg):\n dummy_context.run(pipeline_name=\"missing-pipeline\")\n\n @pytest.mark.parametrize(\n \"extra_params\",\n [None, {}, {\"foo\": \"bar\", \"baz\": [1, 2], \"qux\": None}],\n indirect=True,\n )\n def test_run_with_extra_params(\n self, mocker, dummy_context, dummy_dataframe, extra_params\n ):\n mocker.patch(\"logging.config.dictConfig\")\n mock_journal = mocker.patch(\"kedro.framework.context.context.Journal\")\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n dummy_context.run()\n\n assert mock_journal.call_args[0][0][\"extra_params\"] == extra_params\n\n def test_run_with_save_version_as_run_id(\n self, mocker, tmp_path, dummy_dataframe, caplog\n ):\n \"\"\"Test that the default behaviour, with run_id set to None,\n creates a journal record with the run_id the same as save_version.\n \"\"\"\n mocker.patch(\"logging.config.dictConfig\")\n save_version = \"2020-01-01T00.00.00.000Z\"\n mocked_get_save_version = mocker.patch.object(\n DummyContext, \"_get_save_version\", return_value=save_version\n )\n\n dummy_context = DummyContext(str(tmp_path))\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n dummy_context.run(load_versions={\"boats\": save_version})\n\n mocked_get_save_version.assert_called_once_with()\n log_msg = next(\n record.getMessage()\n for record in caplog.records\n if record.name == \"kedro.journal\"\n )\n assert json.loads(log_msg)[\"run_id\"] == save_version\n\n def test_run_with_custom_run_id(self, mocker, tmp_path, dummy_dataframe, caplog):\n mocker.patch(\"logging.config.dictConfig\")\n run_id = \"001\"\n mocked_get_run_id = mocker.patch.object(\n DummyContext, \"_get_run_id\", return_value=run_id\n )\n\n dummy_context = DummyContext(str(tmp_path))\n dummy_context.catalog.save(\"cars\", dummy_dataframe)\n dummy_context.run()\n\n assert (\n mocked_get_run_id.call_count == 3\n ) # once during run, and twice for each `.catalog`\n log_msg = next(\n record.getMessage()\n for record in caplog.records\n if record.name == \"kedro.journal\"\n )\n assert json.loads(log_msg)[\"run_id\"] == run_id\n\n @pytest.mark.parametrize(\n \"ctx_project_name\",\n [\"project_name\", \"Project name \", \"_Project--name-\", \"--Project-_\\n ~-namE__\"],\n )\n def test_default_package_name(self, tmp_path, mocker, ctx_project_name):\n \"\"\"Test default package name derived by ProjectContext\"\"\"\n mocker.patch(\"logging.config.dictConfig\")\n\n expected_package_name = \"project_name\"\n\n class DummyContextNoPkgName(KedroContext):\n project_name = ctx_project_name\n project_version = kedro_version\n\n def _get_pipelines(self): # pragma: no cover\n return {\"__default__\": Pipeline([])}\n\n dummy_context = DummyContextNoPkgName(tmp_path)\n assert dummy_context.package_name == expected_package_name\n\n\[email protected](\n \"path_string,expected\",\n [\n # remote paths shouldn't be relative paths\n (\"s3://\", False),\n (\"gcp://path/to/file.json\", False),\n # windows absolute path shouldn't relative paths\n (\"C:\\\\path\\\\to\\\\file.json\", False),\n (\"C:\", False),\n (\"C:/Windows/\", False),\n # posix absolute path shouldn't be relative paths\n (\"/tmp/logs/info.log\", False),\n (\"/usr/share\", False),\n # test relative paths\n (\"data/01_raw/data.json\", True),\n (\"logs/info.log\", True),\n (\"logs\\\\error.txt\", True),\n (\"data\", True),\n ],\n)\ndef test_is_relative_path(path_string: str, expected: bool):\n assert _is_relative_path(path_string) == expected\n\n\ndef test_convert_paths_raises_error_on_relative_project_path():\n path = Path(\"relative/path\")\n with pytest.raises(ValueError) as excinfo:\n _convert_paths_to_absolute_posix(project_path=path, conf_dictionary={})\n\n assert (\n str(excinfo.value) == f\"project_path must be an absolute path. Received: {path}\"\n )\n\n\[email protected](\n \"project_path,input_conf,expected\",\n [\n (\n PurePosixPath(\"/tmp\"),\n {\"handler\": {\"filename\": \"logs/info.log\"}},\n {\"handler\": {\"filename\": \"/tmp/logs/info.log\"}},\n ),\n (\n PurePosixPath(\"/User/kedro\"),\n {\"my_dataset\": {\"filepath\": \"data/01_raw/dataset.json\"}},\n {\"my_dataset\": {\"filepath\": \"/User/kedro/data/01_raw/dataset.json\"}},\n ),\n (\n PureWindowsPath(\"C:\\\\kedro\"),\n {\"my_dataset\": {\"path\": \"data/01_raw/dataset.json\"}},\n {\"my_dataset\": {\"path\": \"C:/kedro/data/01_raw/dataset.json\"}},\n ),\n # test: the function shouldn't modify paths for key not associated with filepath\n (\n PurePosixPath(\"/User/kedro\"),\n {\"my_dataset\": {\"fileurl\": \"relative/url\"}},\n {\"my_dataset\": {\"fileurl\": \"relative/url\"}},\n ),\n ],\n)\ndef test_convert_paths_to_absolute_posix_for_all_known_filepath_keys(\n project_path: Path, input_conf: Dict[str, Any], expected: Dict[str, Any]\n):\n assert _convert_paths_to_absolute_posix(project_path, input_conf) == expected\n\n\[email protected](\n \"project_path,input_conf,expected\",\n [\n (\n PurePosixPath(\"/tmp\"),\n {\"handler\": {\"filename\": \"/usr/local/logs/info.log\"}},\n {\"handler\": {\"filename\": \"/usr/local/logs/info.log\"}},\n ),\n (\n PurePosixPath(\"/User/kedro\"),\n {\"my_dataset\": {\"filepath\": \"s3://data/01_raw/dataset.json\"}},\n {\"my_dataset\": {\"filepath\": \"s3://data/01_raw/dataset.json\"}},\n ),\n ],\n)\ndef test_convert_paths_to_absolute_posix_not_changing_non_relative_path(\n project_path: Path, input_conf: Dict[str, Any], expected: Dict[str, Any]\n):\n assert _convert_paths_to_absolute_posix(project_path, input_conf) == expected\n\n\[email protected](\n \"project_path,input_conf,expected\",\n [\n (\n PureWindowsPath(\"D:\\\\kedro\"),\n {\"my_dataset\": {\"path\": r\"C:\\data\\01_raw\\dataset.json\"}},\n {\"my_dataset\": {\"path\": \"C:/data/01_raw/dataset.json\"}},\n )\n ],\n)\ndef test_convert_paths_to_absolute_posix_converts_full_windows_path_to_posix(\n project_path: Path, input_conf: Dict[str, Any], expected: Dict[str, Any]\n):\n assert _convert_paths_to_absolute_posix(project_path, input_conf) == expected\n\n\[email protected](\n \"layers\",\n [\n {\"raw\": {\"A\"}, \"interm\": {\"B\", \"C\"}},\n {\"raw\": {\"A\"}, \"interm\": {\"B@2\", \"B@1\"}},\n {\"raw\": {\"C@1\"}, \"interm\": {\"A\", \"B@1\", \"B@2\", \"B@3\"}},\n ],\n)\ndef test_validate_layers(layers, mocker):\n mock_catalog = mocker.MagicMock()\n mock_catalog.layers = layers\n\n _validate_layers_for_transcoding(mock_catalog) # it shouldn't raise any error\n\n\[email protected](\n \"layers,conflicting_datasets\",\n [\n ({\"raw\": {\"A\", \"B@1\"}, \"interm\": {\"B@2\"}}, [\"B@2\"]),\n ({\"raw\": {\"A\"}, \"interm\": {\"B@1\", \"B@2\"}, \"prm\": {\"B@3\"}}, [\"B@3\"]),\n (\n {\n \"raw\": {\"A@1\"},\n \"interm\": {\"B@1\", \"B@2\"},\n \"prm\": {\"B@3\", \"B@4\"},\n \"other\": {\"A@2\"},\n },\n [\"A@2\", \"B@3\", \"B@4\"],\n ),\n ],\n)\ndef test_validate_layers_error(layers, conflicting_datasets, mocker):\n mock_catalog = mocker.MagicMock()\n mock_catalog.layers = layers\n error_str = \", \".join(conflicting_datasets)\n\n pattern = f\"Transcoded datasets should have the same layer. Mismatch found for: {error_str}\"\n with pytest.raises(ValueError, match=re.escape(pattern)):\n _validate_layers_for_transcoding(mock_catalog)\n\n\nclass TestValidateSourcePath:\n @pytest.mark.parametrize(\n \"source_dir\", [\".\", \"src\", \"./src\", \"src/nested\", \"src/nested/nested\"]\n )\n def test_valid_source_path(self, tmp_path, source_dir):\n source_path = (tmp_path / source_dir).resolve()\n source_path.mkdir(parents=True, exist_ok=True)\n validate_source_path(source_path, tmp_path.resolve())\n\n @pytest.mark.parametrize(\"source_dir\", [\"..\", \"src/../..\", \"~\"])\n def test_invalid_source_path(self, tmp_path, source_dir):\n source_dir = Path(source_dir).expanduser()\n source_path = (tmp_path / source_dir).resolve()\n source_path.mkdir(parents=True, exist_ok=True)\n\n pattern = re.escape(\n f\"Source path '{source_path}' has to be relative to your project root \"\n f\"'{tmp_path.resolve()}'\"\n )\n with pytest.raises(KedroContextError, match=pattern):\n validate_source_path(source_path, tmp_path.resolve())\n\n def test_non_existent_source_path(self, tmp_path):\n source_path = (tmp_path / \"non_existent\").resolve()\n\n pattern = re.escape(f\"Source path '{source_path}' cannot be found.\")\n with pytest.raises(KedroContextError, match=pattern):\n validate_source_path(source_path, tmp_path.resolve())\n"
] |
[
[
"pandas.util.testing.assert_frame_equal",
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
]
] |
mahkons/RL-algorithms
|
[
"bc5da6734263184e6229d34cd68f092feb94e9a3"
] |
[
"SAC/agent.py"
] |
[
"import random\nimport numpy as np\nimport os\nimport torch\n\n\nclass Agent:\n def __init__(self):\n self.model = torch.load(__file__[:-8] + \"/agent.pkl\", map_location=\"cpu\")\n \n def act(self, state):\n with torch.no_grad():\n state = torch.tensor(np.array([state]), dtype=torch.float)\n _, mean, _, _ = self.model.sample(state)\n return torch.tanh(mean[0]).numpy()\n\n def reset(self):\n pass\n"
] |
[
[
"torch.tanh",
"numpy.array",
"torch.no_grad",
"torch.load"
]
] |
lizhaoliu-Lec/fcos
|
[
"6a5e25831c943f46286c3cff65500139886c921e"
] |
[
"libs/iou_loss.py"
] |
[
"import torch\n\n\ndef bbox_overlaps_aligned(bboxes1, bboxes2, is_aligned=False):\n '''\n Param:\n bboxes1: FloatTensor(n, 4) # 4: ymin, xmin, ymax, xmax\n bboxes2: FloatTensor(n, 4)\n\n Return: \n FloatTensor(n)\n '''\n tl = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]\n br = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]\n hw = (br - tl + 1).clamp(min=0) # [rows, 2]\n overlap = hw[:, 0] * hw[:, 1]\n area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (bboxes1[:, 3] - bboxes1[:, 1] + 1)\n area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (bboxes2[:, 3] - bboxes2[:, 1] + 1)\n ious = overlap / (area1 + area2 - overlap)\n return ious\n\n\ndef iou_loss(pred, target, eps=1e-6):\n '''\n Param:\n pred: FloatTensor(n, 4) # 4: ymin, xmin, ymax, xmax\n target: FloatTensor(n, 4)\n\n Return: \n FloatTensor(n)\n '''\n ious = bbox_overlaps_aligned(pred, target).clamp(min=eps)\n loss = -ious.log()\n return loss\n"
] |
[
[
"torch.min",
"torch.max"
]
] |
forrestjgq/Open3D
|
[
"dcf94bd21943d943b54b402aed4e868da2687180",
"dcf94bd21943d943b54b402aed4e868da2687180"
] |
[
"examples/python/geometry/point_cloud_with_numpy.py",
"examples/python/geometry/ray_casting_to_image.py"
] |
[
"# ----------------------------------------------------------------------------\n# - Open3D: www.open3d.org -\n# ----------------------------------------------------------------------------\n# The MIT License (MIT)\n#\n# Copyright (c) 2018-2021 www.open3d.org\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n# ----------------------------------------------------------------------------\n\nimport open3d as o3d\nimport numpy as np\n\nif __name__ == \"__main__\":\n # Generate some n x 3 matrix using a variant of sync function.\n x = np.linspace(-3, 3, 201)\n mesh_x, mesh_y = np.meshgrid(x, x)\n z = np.sinc((np.power(mesh_x, 2) + np.power(mesh_y, 2)))\n z_norm = (z - z.min()) / (z.max() - z.min())\n xyz = np.zeros((np.size(mesh_x), 3))\n xyz[:, 0] = np.reshape(mesh_x, -1)\n xyz[:, 1] = np.reshape(mesh_y, -1)\n xyz[:, 2] = np.reshape(z_norm, -1)\n print(\"Printing numpy array used to make Open3D pointcloud ...\")\n print(xyz)\n\n # Pass xyz to Open3D.o3d.geometry.PointCloud and visualize.\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(xyz)\n # Add color and estimate normals for better visualization.\n pcd.paint_uniform_color([0.5, 0.5, 0.5])\n pcd.estimate_normals()\n pcd.orient_normals_consistent_tangent_plane(1)\n print(\"Displaying Open3D pointcloud made using numpy array ...\")\n o3d.visualization.draw([pcd])\n\n # Convert Open3D.o3d.geometry.PointCloud to numpy array.\n xyz_converted = np.asarray(pcd.points)\n print(\"Printing numpy array made using Open3D pointcloud ...\")\n print(xyz_converted)\n",
"# ----------------------------------------------------------------------------\n# - Open3D: www.open3d.org -\n# ----------------------------------------------------------------------------\n# The MIT License (MIT)\n#\n# Copyright (c) 2018-2021 www.open3d.org\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n# ----------------------------------------------------------------------------\n\nimport open3d as o3d\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n # Create meshes and convert to open3d.t.geometry.TriangleMesh .\n cube = o3d.geometry.TriangleMesh.create_box().translate([0, 0, 0])\n cube = o3d.t.geometry.TriangleMesh.from_legacy(cube)\n torus = o3d.geometry.TriangleMesh.create_torus().translate([0, 0, 2])\n torus = o3d.t.geometry.TriangleMesh.from_legacy(torus)\n sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.5).translate(\n [1, 2, 3])\n sphere = o3d.t.geometry.TriangleMesh.from_legacy(sphere)\n\n scene = o3d.t.geometry.RaycastingScene()\n scene.add_triangles(cube)\n scene.add_triangles(torus)\n _ = scene.add_triangles(sphere)\n\n rays = o3d.t.geometry.RaycastingScene.create_rays_pinhole(\n fov_deg=90,\n center=[0, 0, 2],\n eye=[2, 3, 0],\n up=[0, 1, 0],\n width_px=640,\n height_px=480,\n )\n # We can directly pass the rays tensor to the cast_rays function.\n ans = scene.cast_rays(rays)\n plt.imshow(ans['t_hit'].numpy())\n plt.show()\n plt.imshow(np.abs(ans['primitive_normals'].numpy()))\n plt.show()\n plt.imshow(np.abs(ans['geometry_ids'].numpy()), vmax=3)\n plt.show()\n"
] |
[
[
"numpy.linspace",
"numpy.power",
"numpy.reshape",
"numpy.asarray",
"numpy.size",
"numpy.meshgrid"
],
[
"matplotlib.pyplot.show"
]
] |
mayankgolhar/self-supervised-da
|
[
"8402349f495887c97a5f91499d7c474830545530"
] |
[
"data/data_manager.py"
] |
[
"from torch.utils.data import Dataset\nfrom torchvision import transforms\nimport os\nimport numpy as np\nfrom PIL import Image\nimport torch\nimport random\nimport re\n\nIMG_EXTENSIONS = [\n '.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff'\n]\n\nclass Data_Manager(Dataset):\n def __init__(self, data_path, num_views = 7, random_seed=1000, test_split = 0, is_train = True):\n super(Data_Manager, self).__init__()\n self.data_path = data_path\n self.num_views = num_views\n self.is_train = is_train\n # self.num_folds = num_folds # Number of folds in cross validation\n\n if is_train:\n # TO-DO : Add more transformations\n self.transform = transforms.Compose([transforms.ColorJitter(brightness=0.2*np.random.rand(1)[0],\n contrast=0.2*np.random.rand(1)[0]), \n transforms.RandomAffine(degrees=(0,180), \n translate=(0.3,0.3),\n scale=(0.5,2), \n fillcolor=0),\n transforms.RandomHorizontalFlip(p=0.2),\n transforms.RandomVerticalFlip(p=0.2),\n transforms.RandomCrop(224, 224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], # Using Pre trained model normalisation\n std=[0.229, 0.224, 0.225])])\n disp_msg = 'Train Loader :'\n else:\n self.transform = transforms.Compose([\n transforms.RandomHorizontalFlip(p=0.2),\n transforms.RandomVerticalFlip(p=0.2),\n transforms.RandomCrop(224, 224),\n transforms.ToTensor(), \n transforms.Normalize(mean=[0.485, 0.456, 0.406], # Using pre-trained model normalisation\n std=[0.229, 0.224, 0.225])\n ])\n disp_msg = 'Test Loader :'\n\n # Create a dictionary of classes, videos and respective frames\n self.videos = []\n video_idx = 0\n # Assumed folder structure\n # UAH\n # |-SplitID (eg. Split0)\n # |- ClassName (eg. adenoma, hyper, serr)\n # |- ClassVideoIDMode (eg. adenoma1nbi)\n # |- FrameNum (eg. 0.png)\n \n # Iterate through all split folders\n for split_folder in os.listdir(data_path): \n # Split string into text & numbers \n # https://www.geeksforgeeks.org/python-splitting-text-and-number-in-string/\n temp = re.compile(\"([a-zA-Z]+)([0-9]+)\") \n split_id = temp.match(split_folder).groups() \n split_id = int(split_id[-1])\n \n # Skip split folder if test split during training or train split during testing\n if (is_train and (split_id == test_split)) or (not is_train and (split_id != test_split)):\n continue\n \n print(disp_msg + split_folder + ' used')\n\n split_path = os.path.join(data_path, split_folder)\n\n # Iterate through all class folders\n for class_name in os.listdir(split_path): \n # Iterate through all videos \n class_path = os.path.join(split_path, class_name)\n\n for video_name in os.listdir(class_path):\n video_path = os.path.join(class_path,video_name)\n\n if not os.path.isdir(video_path):\n continue\n frame_list = []\n label, lesion_id, mode = self.get_video_label_uah(video_name)\n\n if mode == 'nbi':\n continue\n\n # Create list of all frames in the video folder\n for frame_name in os.listdir(video_path):\n if self.is_image_file(frame_name):\n frame_path = os.path.join(video_path, frame_name)\n frame_list.append(frame_path) \n\n self.videos.append(dict(lesion_id = lesion_id,\n video_path = video_path,\n frame_list = frame_list,\n label = label,\n mode = mode,\n )) \t\t\n\n #Do random shuffling of video indices\n self.vid_idxes = np.arange(0,len(self.videos))\n np.random.seed(random_seed)\n np.random.shuffle(self.vid_idxes)\n\n if len(self.vid_idxes):\n frames = 0\n for video_idx in self.vid_idxes:\n video = self.videos[video_idx]\n frames += len(video['frame_list'])\n print(\"{} total frames collected\".format(frames))\n else:\n print(\"No images were loaded from {}\".format(self.data_path))\n\n def __len__(self):\n return len(self.vid_idxes)\n\n def __getitem__(self, index):\n # Select video from the index list \n vid_index = self.vid_idxes[index]\n vid = self.videos[vid_index]\n tot_frames = len(vid['frame_list'])\n \n # # Select equi-spaced frames from the video\n # frames = np.linspace(0, tot_frames-1, self.num_views)\n\n if self.is_train:\n # Select frames randomly from the video\n frames = np.random.randint(0, tot_frames, self.num_views)\n \n # Order them in temporally increasing order\n frames.sort()\n else:\n # Select equi-spaced frames from the video\n frames = np.linspace(0, tot_frames-1, self.num_views)\n frames = np.around(frames)\n\n n = 0\n for frame in frames:\n im = Image.open(vid['frame_list'][int(frame)])\n im = self.transform(im)\n im = im.unsqueeze(0)\n if n is 0:\n imgs = im\n else:\n imgs = torch.cat((imgs, im), 0)\n n += 1\n\n #get labels\n cls_label = vid['label'] \n return dict(inp_data = imgs, cls_label = cls_label)\n\n def is_image_file(self, filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n def get_video_label_uah(self, filename):\n #Get class label from video's filename \n #hyperplastic = 0, adenoma = 1, serrated = 2 \n #filename format : classFileNumberMode \n \n #Split filename \n #REF : https://stackoverflow.com/questions/430079/how-to-split-strings-into-text-and-number\n match = re.match(r\"([a-z]+)([0-9]+)([a-z]+)\", filename, re.I)\n if match:\n className, fileId, mode = match.groups()\n else:\n print(\"Issue spilting file {}\".format(filename))\t\t\t\n\n if className == 'hyper':\n class_label = 0\n elif className == 'adenoma':\n class_label = 1\n elif className == 'serr':\n class_label = 2\n\n return (class_label, fileId, mode)\n\n\nif __name__ == '__main__':\n from torchvision import transforms\n from torch.utils.data import DataLoader\n im_transforms = transforms.Compose([transforms.RandomHorizontalFlip(p=0.3),\n transforms.RandomVerticalFlip(p=0.3), \n transforms.RandomCrop(size=(224,224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5])])\n dataset_train = Data_Manager(data_path = \"D:/Research/EndoDS/UAH_CrossValidation\", num_views = 7,\n random_seed=1000, test_split = 0, is_train = True)\n data_loader_train = DataLoader(dataset_train, batch_size = 6, shuffle = True)\n\n n = 0\n for batch in data_loader_train:\n\n if n == 1:\n exit() "
] |
[
[
"numpy.linspace",
"numpy.random.seed",
"torch.cat",
"numpy.around",
"torch.utils.data.DataLoader",
"numpy.random.shuffle",
"numpy.random.rand",
"numpy.random.randint"
]
] |
peterprescott/statsmodels
|
[
"7c4a71e8d16618315b7370c88212aa2a4f358270"
] |
[
"statsmodels/tools/eval_measures.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"some measures for evaluation of prediction, tests and model selection\n\nCreated on Tue Nov 08 15:23:20 2011\nUpdated on Wed Jun 03 10:42:20 2020\n\nAuthors: Josef Perktold & Peter Prescott\nLicense: BSD-3\n\n\"\"\"\nimport numpy as np\n\nfrom statsmodels.tools.validation import array_like\n\n\ndef mse(x1, x2, axis=0):\n \"\"\"mean squared error\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two\n arrays.\n axis : int\n axis along which the summary statistic is calculated\n\n Returns\n -------\n mse : ndarray or float\n mean squared error along given axis.\n\n Notes\n -----\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast.\n This uses ``numpy.asanyarray`` to convert the input. Whether this is the\n desired result or not depends on the array subclass, for example\n numpy matrices will silently produce an incorrect result.\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.mean((x1-x2)**2, axis=axis)\n\n\ndef rmse(x1, x2, axis=0):\n \"\"\"root mean squared error\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two\n arrays.\n axis : int\n axis along which the summary statistic is calculated\n\n Returns\n -------\n rmse : ndarray or float\n root mean squared error along given axis.\n\n Notes\n -----\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast.\n This uses ``numpy.asanyarray`` to convert the input. Whether this is the\n desired result or not depends on the array subclass, for example\n numpy matrices will silently produce an incorrect result.\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.sqrt(mse(x1, x2, axis=axis))\n\ndef rmspe(y, y_hat, axis=0, zeros=np.nan):\n \"\"\"\n Root Mean Squared Percentage Error\n\n Parameters\n ----------\n y : array_like\n The actual value.\n y_hat : array_like\n The predicted value.\n axis : int\n Axis along which the summary statistic is calculated\n zeros : float\n Value to assign to error where actual value is zero\n\n Returns\n -------\n rmspe : ndarray or float\n Root Mean Squared Percentage Error along given axis.\n \"\"\"\n y_hat = np.asarray(y_hat)\n y = np.asarray(y)\n error = y - y_hat\n percentage_error = np.divide(error, y, out=np.full_like(error, zeros), where=y!=0)\n return np.nanmean(percentage_error**2, axis=axis) * 100\n\ndef maxabs(x1, x2, axis=0):\n \"\"\"maximum absolute error\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two\n arrays.\n axis : int\n axis along which the summary statistic is calculated\n\n Returns\n -------\n maxabs : ndarray or float\n maximum absolute difference along given axis.\n\n Notes\n -----\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast.\n This uses ``numpy.asanyarray`` to convert the input. Whether this is the\n desired result or not depends on the array subclass.\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.max(np.abs(x1-x2), axis=axis)\n\n\ndef meanabs(x1, x2, axis=0):\n \"\"\"mean absolute error\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two\n arrays.\n axis : int\n axis along which the summary statistic is calculated\n\n Returns\n -------\n meanabs : ndarray or float\n mean absolute difference along given axis.\n\n Notes\n -----\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast.\n This uses ``numpy.asanyarray`` to convert the input. Whether this is the\n desired result or not depends on the array subclass.\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.mean(np.abs(x1-x2), axis=axis)\n\n\ndef medianabs(x1, x2, axis=0):\n \"\"\"median absolute error\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two\n arrays.\n axis : int\n axis along which the summary statistic is calculated\n\n Returns\n -------\n medianabs : ndarray or float\n median absolute difference along given axis.\n\n Notes\n -----\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast.\n This uses ``numpy.asanyarray`` to convert the input. Whether this is the\n desired result or not depends on the array subclass.\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.median(np.abs(x1-x2), axis=axis)\n\n\ndef bias(x1, x2, axis=0):\n \"\"\"bias, mean error\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two\n arrays.\n axis : int\n axis along which the summary statistic is calculated\n\n Returns\n -------\n bias : ndarray or float\n bias, or mean difference along given axis.\n\n Notes\n -----\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast.\n This uses ``numpy.asanyarray`` to convert the input. Whether this is the\n desired result or not depends on the array subclass.\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.mean(x1-x2, axis=axis)\n\n\ndef medianbias(x1, x2, axis=0):\n \"\"\"median bias, median error\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two\n arrays.\n axis : int\n axis along which the summary statistic is calculated\n\n Returns\n -------\n medianbias : ndarray or float\n median bias, or median difference along given axis.\n\n Notes\n -----\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast.\n This uses ``numpy.asanyarray`` to convert the input. Whether this is the\n desired result or not depends on the array subclass.\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.median(x1-x2, axis=axis)\n\n\ndef vare(x1, x2, ddof=0, axis=0):\n \"\"\"variance of error\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two\n arrays.\n axis : int\n axis along which the summary statistic is calculated\n\n Returns\n -------\n vare : ndarray or float\n variance of difference along given axis.\n\n Notes\n -----\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast.\n This uses ``numpy.asanyarray`` to convert the input. Whether this is the\n desired result or not depends on the array subclass.\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.var(x1-x2, ddof=ddof, axis=axis)\n\n\ndef stde(x1, x2, ddof=0, axis=0):\n \"\"\"standard deviation of error\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two\n arrays.\n axis : int\n axis along which the summary statistic is calculated\n\n Returns\n -------\n stde : ndarray or float\n standard deviation of difference along given axis.\n\n Notes\n -----\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast.\n This uses ``numpy.asanyarray`` to convert the input. Whether this is the\n desired result or not depends on the array subclass.\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.std(x1-x2, ddof=ddof, axis=axis)\n\n\ndef iqr(x1, x2, axis=0):\n \"\"\"\n Interquartile range of error\n\n Parameters\n ----------\n x1 : array_like\n One of the inputs into the IQR calculation.\n x2 : array_like\n The other input into the IQR calculation.\n axis : {None, int}\n axis along which the summary statistic is calculated\n\n Returns\n -------\n irq : {float, ndarray}\n Interquartile range along given axis.\n\n Notes\n -----\n If ``x1`` and ``x2`` have different shapes, then they must broadcast.\n \"\"\"\n x1 = array_like(x1, 'x1', dtype=None, ndim=None)\n x2 = array_like(x2, 'x1', dtype=None, ndim=None)\n if axis is None:\n x1 = x1.ravel()\n x2 = x2.ravel()\n axis = 0\n xdiff = np.sort(x1 - x2, axis=axis)\n nobs = x1.shape[axis]\n idx = np.round((nobs-1) * np.array([0.25, 0.75])).astype(int)\n sl = [slice(None)] * xdiff.ndim\n sl[axis] = idx\n iqr = np.diff(xdiff[tuple(sl)], axis=axis)\n iqr = np.squeeze(iqr) # drop reduced dimension\n return iqr\n\n\n# Information Criteria\n# ---------------------\n\ndef aic(llf, nobs, df_modelwc):\n \"\"\"Akaike information criterion\n\n Parameters\n ----------\n llf : float\n value of the loglikelihood\n nobs : int\n number of observations\n df_modelwc : int\n number of parameters including constant\n\n Returns\n -------\n aic : float\n information criterion\n\n References\n ----------\n https://en.wikipedia.org/wiki/Akaike_information_criterion\n \"\"\"\n return -2. * llf + 2. * df_modelwc\n\n\ndef aicc(llf, nobs, df_modelwc):\n \"\"\"Akaike information criterion (AIC) with small sample correction\n\n Parameters\n ----------\n llf : float\n value of the loglikelihood\n nobs : int\n number of observations\n df_modelwc : int\n number of parameters including constant\n\n Returns\n -------\n aicc : float\n information criterion\n\n References\n ----------\n https://en.wikipedia.org/wiki/Akaike_information_criterion#AICc\n \"\"\"\n return -2. * llf + 2. * df_modelwc * nobs / (nobs - df_modelwc - 1.)\n\n\ndef bic(llf, nobs, df_modelwc):\n \"\"\"Bayesian information criterion (BIC) or Schwarz criterion\n\n Parameters\n ----------\n llf : float\n value of the loglikelihood\n nobs : int\n number of observations\n df_modelwc : int\n number of parameters including constant\n\n Returns\n -------\n bic : float\n information criterion\n\n References\n ----------\n https://en.wikipedia.org/wiki/Bayesian_information_criterion\n \"\"\"\n return -2. * llf + np.log(nobs) * df_modelwc\n\n\ndef hqic(llf, nobs, df_modelwc):\n \"\"\"Hannan-Quinn information criterion (HQC)\n\n Parameters\n ----------\n llf : float\n value of the loglikelihood\n nobs : int\n number of observations\n df_modelwc : int\n number of parameters including constant\n\n Returns\n -------\n hqic : float\n information criterion\n\n References\n ----------\n Wikipedia does not say much\n \"\"\"\n return -2. * llf + 2 * np.log(np.log(nobs)) * df_modelwc\n\n\n# IC based on residual sigma\n\ndef aic_sigma(sigma2, nobs, df_modelwc, islog=False):\n r\"\"\"Akaike information criterion\n\n Parameters\n ----------\n sigma2 : float\n estimate of the residual variance or determinant of Sigma_hat in the\n multivariate case. If islog is true, then it is assumed that sigma\n is already log-ed, for example logdetSigma.\n nobs : int\n number of observations\n df_modelwc : int\n number of parameters including constant\n\n Returns\n -------\n aic : float\n information criterion\n\n Notes\n -----\n A constant has been dropped in comparison to the loglikelihood base\n information criteria. The information criteria should be used to compare\n only comparable models.\n\n For example, AIC is defined in terms of the loglikelihood as\n\n :math:`-2 llf + 2 k`\n\n in terms of :math:`\\hat{\\sigma}^2`\n\n :math:`log(\\hat{\\sigma}^2) + 2 k / n`\n\n in terms of the determinant of :math:`\\hat{\\Sigma}`\n\n :math:`log(\\|\\hat{\\Sigma}\\|) + 2 k / n`\n\n Note: In our definition we do not divide by n in the log-likelihood\n version.\n\n TODO: Latex math\n\n reference for example lecture notes by Herman Bierens\n\n See Also\n --------\n\n References\n ----------\n https://en.wikipedia.org/wiki/Akaike_information_criterion\n \"\"\"\n if not islog:\n sigma2 = np.log(sigma2)\n return sigma2 + aic(0, nobs, df_modelwc) / nobs\n\n\ndef aicc_sigma(sigma2, nobs, df_modelwc, islog=False):\n \"\"\"Akaike information criterion (AIC) with small sample correction\n\n Parameters\n ----------\n sigma2 : float\n estimate of the residual variance or determinant of Sigma_hat in the\n multivariate case. If islog is true, then it is assumed that sigma\n is already log-ed, for example logdetSigma.\n nobs : int\n number of observations\n df_modelwc : int\n number of parameters including constant\n\n Returns\n -------\n aicc : float\n information criterion\n\n Notes\n -----\n A constant has been dropped in comparison to the loglikelihood base\n information criteria. These should be used to compare for comparable\n models.\n\n References\n ----------\n https://en.wikipedia.org/wiki/Akaike_information_criterion#AICc\n \"\"\"\n if not islog:\n sigma2 = np.log(sigma2)\n return sigma2 + aicc(0, nobs, df_modelwc) / nobs\n\n\ndef bic_sigma(sigma2, nobs, df_modelwc, islog=False):\n \"\"\"Bayesian information criterion (BIC) or Schwarz criterion\n\n Parameters\n ----------\n sigma2 : float\n estimate of the residual variance or determinant of Sigma_hat in the\n multivariate case. If islog is true, then it is assumed that sigma\n is already log-ed, for example logdetSigma.\n nobs : int\n number of observations\n df_modelwc : int\n number of parameters including constant\n\n Returns\n -------\n bic : float\n information criterion\n\n Notes\n -----\n A constant has been dropped in comparison to the loglikelihood base\n information criteria. These should be used to compare for comparable\n models.\n\n References\n ----------\n https://en.wikipedia.org/wiki/Bayesian_information_criterion\n \"\"\"\n if not islog:\n sigma2 = np.log(sigma2)\n return sigma2 + bic(0, nobs, df_modelwc) / nobs\n\n\ndef hqic_sigma(sigma2, nobs, df_modelwc, islog=False):\n \"\"\"Hannan-Quinn information criterion (HQC)\n\n Parameters\n ----------\n sigma2 : float\n estimate of the residual variance or determinant of Sigma_hat in the\n multivariate case. If islog is true, then it is assumed that sigma\n is already log-ed, for example logdetSigma.\n nobs : int\n number of observations\n df_modelwc : int\n number of parameters including constant\n\n Returns\n -------\n hqic : float\n information criterion\n\n Notes\n -----\n A constant has been dropped in comparison to the loglikelihood base\n information criteria. These should be used to compare for comparable\n models.\n\n References\n ----------\n xxx\n \"\"\"\n if not islog:\n sigma2 = np.log(sigma2)\n return sigma2 + hqic(0, nobs, df_modelwc) / nobs\n\n\n# from var_model.py, VAR only? separates neqs and k_vars per equation\n# def fpe_sigma():\n# ((nobs + self.df_model) / self.df_resid) ** neqs * np.exp(ld)\n\n\n__all__ = [maxabs, meanabs, medianabs, medianbias, mse, rmse, rmspe, stde, vare,\n aic, aic_sigma, aicc, aicc_sigma, bias, bic, bic_sigma,\n hqic, hqic_sigma, iqr]\n"
] |
[
[
"numpy.log",
"numpy.abs",
"numpy.asarray",
"numpy.median",
"numpy.squeeze",
"numpy.sort",
"numpy.full_like",
"numpy.std",
"numpy.asanyarray",
"numpy.mean",
"numpy.nanmean",
"numpy.var",
"numpy.array"
]
] |
bernardosabatinilab/face-rhythm
|
[
"ea4b5213827beecc174a0f510574d81346b2f07e"
] |
[
"face_rhythm/util/setup.py"
] |
[
"import cv2\nimport torch\nimport numpy as np\n\nimport yaml\nfrom pathlib import Path\n\nfrom datetime import datetime\nfrom dateutil.tz import tzlocal\nfrom pynwb import NWBFile, NWBHDF5IO\n\nfrom face_rhythm.util import helpers\n\n\ndef setup_project(project_path, sessions_path, run_name, overwrite_config, remote, trials, multisession):\n \"\"\"\n Creates the project folder and data folder (if they don't exist)\n Creates the config file (if it doesn't exist or overwrite requested)\n Returns path to the config file\n\n Args:\n project_path (Path): path to the project (usually ./)\n sessions_path (Path): path to the session folders and videos\n run_name (str): name for this current run of Face Rhythm\n overwrite_config (bool): whether to overwrite the config\n remote (bool): whether running on remote\n trials (bool): whether using a trial structure for the recordings\n\n Returns:\n config_filepath (str): path to the current config\n \"\"\"\n project_path.mkdir(parents=True, exist_ok=True)\n (project_path / 'configs').mkdir(parents=True, exist_ok=True)\n (project_path / 'data').mkdir(parents=True, exist_ok=True)\n (project_path / 'viz').mkdir(parents=True, exist_ok=True)\n sessions_path.mkdir(parents=True, exist_ok=True)\n config_filepath = project_path / 'configs' / f'config_{run_name}.yaml'\n if not config_filepath.exists() or overwrite_config:\n generate_config(config_filepath, project_path, sessions_path, remote, trials, multisession)\n\n version_check()\n return config_filepath\n\n\ndef version_check():\n \"\"\"\n Checks the versions of various important softwares.\n Prints those versions\n\n Args:\n\n Returns:\n\n \"\"\"\n ### find version of openCV\n # script currently works with v4.4.0\n (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')\n print(f'OpenCV version: {major_ver}.{minor_ver}.{subminor_ver}')\n # print(cv2.getBuildInformation())\n\n ### find version of pytorch\n print(f'Pytorch version: {torch.__version__}')\n\n\ndef generate_config(config_filepath, project_path, sessions_path, remote, trials, multisession):\n \"\"\"\n Generates bare config file with just basic info\n\n Args:\n config_filepath (Path): path to config file\n project_path (Path): path to the project (usually ./)\n sessions_path (Path): path to the session folders and videos\n remote (bool): whether running on remote\n trials (bool): whether using a trial structure for the recordings\n multisession (bool): whether we'll be handling multiple sessions\n\n Returns:\n \"\"\"\n\n basic_config = {'General': {},\n 'Video': {},\n 'Paths': {},\n 'ROI': {},\n 'Optic': {},\n 'Clean': {},\n 'CDR': {},\n 'PCA': {},\n 'CQT': {},\n 'TCA': {}}\n basic_config['Paths']['project'] = str(project_path)\n basic_config['Paths']['video'] = str(sessions_path)\n basic_config['Paths']['data'] = str(project_path / 'data')\n basic_config['Paths']['viz'] = str(project_path / 'viz')\n basic_config['Paths']['config'] = str(config_filepath)\n basic_config['General']['remote'] = remote\n basic_config['General']['trials'] = trials\n basic_config['General']['multisession'] = multisession\n\n demo_path = project_path / 'viz' / 'demos'\n demo_path.mkdir(parents=True, exist_ok=True)\n basic_config['Video']['demos'] = str(demo_path)\n positional_path = project_path / 'viz' / 'positional'\n positional_path.mkdir(parents=True, exist_ok=True)\n basic_config['TCA']['dir_positional'] = str(positional_path)\n spectral_path = project_path / 'viz' / 'spectral'\n spectral_path.mkdir(parents=True, exist_ok=True)\n basic_config['TCA']['dir_spectral'] = str(spectral_path)\n\n with open(str(config_filepath), 'w') as f:\n yaml.safe_dump(basic_config, f)\n\nclass NoFilesError(Exception):\n \"\"\"Exception raised for errors in the input salary.\n\n Attributes:\n salary -- input salary which caused the error\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, folder, pattern):\n self.message = f'No files found in {folder} with pattern {pattern}'\n super().__init__(self.message)\n\nclass NoFoldersError(Exception):\n \"\"\"Exception raised for errors in the input salary.\n\n Attributes:\n salary -- input salary which caused the error\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, folder, pattern):\n self.message = f'No folders found in {folder} with pattern {pattern}'\n super().__init__(self.message)\n\ndef import_videos(config_filepath):\n \"\"\"\n Loop over one folder and find all videos of interest\n\n Args:\n config_filepath (Path): path to the config file\n\n Returns:\n\n \"\"\"\n\n config = helpers.load_config(config_filepath)\n paths = config['Paths']\n video = config['Video']\n general = config['General']\n general['sessions'] = []\n\n session = {'name': 'session', 'videos': []}\n for vid in Path(paths['video']).iterdir():\n if video['file_prefix'] in str(vid.name):\n if vid.suffix in ['.avi', '.mp4','.mov','.MOV']:\n session['videos'].append(str(vid))\n elif vid.suffix in ['.npy'] and general['trials']:\n session['trial_inds'] = str(vid)\n trial_inds = np.load(session['trial_inds'])\n session['num_trials'] = trial_inds.shape[0]\n session['trial_len'] = trial_inds.shape[1]\n general['sessions'].append(session)\n helpers.save_config(config, config_filepath)\n\n if len(session['videos']) == 0:\n raise NoFilesError(paths['video'], video['file_prefix'])\n\n\ndef import_videos_multisession(config_filepath):\n \"\"\"\n Loop over all sessions and find all videos for each session\n\n Args:\n config_filepath (Path): path to the config file\n\n Returns:\n\n \"\"\"\n\n config = helpers.load_config(config_filepath)\n paths = config['Paths']\n video = config['Video']\n general = config['General']\n general['sessions'] = []\n\n for path in Path(paths['video']).iterdir():\n if path.is_dir() and video['session_prefix'] in str(path.name):\n session = {'name': path.stem, 'videos': []}\n for vid in path.iterdir():\n if vid.suffix in ['.avi', '.mp4','.MOV','.mov']:\n session['videos'].append(str(vid))\n elif vid.suffix in ['.npy'] and general['trials']:\n session['trial_inds'] = str(vid)\n trial_inds = np.load(session['trial_inds'])\n session['num_trials'] = trial_inds.shape[0]\n session['trial_len'] = trial_inds.shape[1]\n general['sessions'].append(session)\n helpers.save_config(config, config_filepath)\n\n if len(general['sessions']) == 0:\n raise NoFoldersError(paths['video'], video['session_prefix'])\n\n\n\n\ndef print_session_report(session):\n \"\"\"\n Prints a simple report of all the session data\n\n Args:\n session (dict): session dictionary\n\n Returns:\n\n \"\"\"\n\n print(f'Current Session: {session[\"name\"]}')\n print(f'number of videos: {session[\"num_vids\"]}')\n print(f'number of frames per video (roughly): {session[\"frames_per_video\"]}')\n print(f'number of frames in ALL videos (roughly): {session[\"frames_total\"]}')\n\n\ndef get_video_data(config_filepath):\n \"\"\"\n get info on the imported video(s): num of frames, video height and width, framerate\n\n Args:\n config_filepath (Path): path to the config file\n\n Returns:\n\n \"\"\"\n config = helpers.load_config(config_filepath)\n general = config['General']\n video = config['Video']\n\n for session in general['sessions']:\n print(session)\n session['num_vids'] = len(session['videos'])\n vid_lens = np.ones(session['num_vids'])\n for i, vid_path in enumerate(session['videos']):\n vid_reader = cv2.VideoCapture(vid_path)\n vid_lens[i] = int(vid_reader.get(cv2.CAP_PROP_FRAME_COUNT))\n session['vid_lens'] = vid_lens.tolist()\n session['frames_total'] = int(sum(session['vid_lens']))\n session['frames_per_video'] = int(session['frames_total'] / session['num_vids'])\n print_session_report(session)\n\n if video['print_filenames']:\n print(f'\\n {np.array(session[\"videos\"]).transpose()}')\n\n video['Fs'] = vid_reader.get(cv2.CAP_PROP_FPS) ## Sampling rate (FPS). Manually change here if necessary\n print(f'Sampling rate pulled from video file metadata: {round(video[\"Fs\"], 3)} frames per second')\n\n vid_reader.set(1, 1)\n ok, frame = vid_reader.read()\n video['height'] = frame.shape[0]\n video['width'] = frame.shape[1]\n\n helpers.save_config(config, config_filepath)\n\n\ndef create_nwbs(config_filepath):\n \"\"\"\n Create one nwb per session. This file will be used for all future data storage\n\n Args:\n config_filepath (Path): path to the config file\n\n Returns:\n\n \"\"\"\n\n config = helpers.load_config(config_filepath)\n general = config['General']\n paths = config['Paths']\n\n for session in general['sessions']:\n session['nwb'] = str(Path(paths['data']) / (session['name']+ '.nwb'))\n if not general['overwrite_nwbs'] and Path(session['nwb']).exists():\n print(f'nwb for {session[\"name\"]} already exists, not overwriting')\n print('set config[\"General\"][\"overwrite_nwbs\"]=True for otherwise')\n continue\n\n nwbfile = NWBFile(session_description=f'face rhythm data',\n identifier=f'{session[\"name\"]}',\n session_start_time=datetime.now(tzlocal()),\n file_create_date=datetime.now(tzlocal()))\n\n nwbfile.create_processing_module(name='Face Rhythm',\n description='all face rhythm related data')\n\n with NWBHDF5IO(session['nwb'], 'w') as io:\n io.write(nwbfile)\n\n helpers.save_config(config, config_filepath)\n\n\ndef prepare_videos(config_filepath):\n \"\"\"\n Collects key video information and stores in the config\n\n Args:\n config_filepath (Path): path to the config file\n\n Returns:\n\n \"\"\"\n config = helpers.load_config(config_filepath)\n if config['General']['multisession']:\n import_videos_multisession(config_filepath)\n else:\n import_videos(config_filepath)\n get_video_data(config_filepath)\n create_nwbs(config_filepath)"
] |
[
[
"numpy.load",
"numpy.array",
"numpy.ones"
]
] |
ROCmSoftwarePlatform/jax
|
[
"be34a14dc40384ac8876fad2b23b5e205ccfe22e"
] |
[
"jax/experimental/jax2tf/tests/jax2tf_test.py"
] |
[
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for JAX2TF converted.\n\nSpecific JAX primitive conversion tests are in primitives_test.\"\"\"\n\nimport re\nfrom typing import Dict, Tuple\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport jax\nfrom jax import dtypes\nfrom jax import lax\nfrom jax import numpy as jnp\nfrom jax import test_util as jtu\nfrom jax.config import config\nfrom jax.experimental import jax2tf\nfrom jax.experimental.jax2tf.tests import tf_test_util\nfrom jax._src import source_info_util\nimport jax._src.lib.xla_bridge\n\nimport numpy as np\nimport tensorflow as tf # type: ignore[import]\n\nconfig.parse_flags_with_absl()\n\n\nclass Jax2TfTest(tf_test_util.JaxToTfTestCase):\n\n def test_basics(self):\n f_jax = lambda x: jnp.sin(jnp.cos(x))\n _, res_tf = self.ConvertAndCompare(f_jax, 0.7)\n\n def test_input_output_naming(self):\n @jax2tf.convert\n def f(xs, y):\n return [jnp.add(x, y) for x in xs]\n\n @tf.function(autograph=False)\n def u(xs, y):\n xs = tf.nest.map_structure(tf.convert_to_tensor, xs)\n with tf.GradientTape() as tape:\n tf.nest.map_structure(tape.watch, xs)\n y = f(xs, y)\n tape.gradient(y, xs)\n return y\n\n cf = u.get_concrete_function([1., 2., 3.], 4.)\n g = cf.graph\n g.get_operation_by_name(\"jax2tf_arg_0\")\n g.get_operation_by_name(\"jax2tf_arg_1\")\n g.get_operation_by_name(\"jax2tf_arg_2\")\n g.get_operation_by_name(\"jax2tf_arg_3\")\n g.get_operation_by_name(\"jax2tf_out\")\n g.get_operation_by_name(\"jax2tf_out_1\")\n g.get_operation_by_name(\"jax2tf_out_2\")\n with self.assertRaises(KeyError):\n g.get_operation_by_name(\"jax2tf_arg_4\")\n with self.assertRaises(KeyError):\n g.get_operation_by_name(\"jax2tf_out_3\")\n g.get_operation_by_name(\"jax2tf_vjp/jax2tf_arg_0\")\n g.get_operation_by_name(\"jax2tf_vjp/jax2tf_arg_1\")\n g.get_operation_by_name(\"jax2tf_vjp/jax2tf_arg_2\")\n g.get_operation_by_name(\"jax2tf_vjp/jax2tf_arg_3\")\n g.get_operation_by_name(\"jax2tf_vjp/jax2tf_out\")\n g.get_operation_by_name(\"jax2tf_vjp/jax2tf_out_1\")\n g.get_operation_by_name(\"jax2tf_vjp/jax2tf_out_2\")\n g.get_operation_by_name(\"jax2tf_vjp/jax2tf_out_3\")\n\n def test_pytrees(self):\n # Take and return pytrees\n def f_jax(x: Tuple[float, Dict[str, float]]) -> Tuple[float, Dict[str, float]]:\n x_a, x_dict = x\n return x_a * 2., {k: v * 3. for k, v in x_dict.items()}\n\n x = (.7, {\"a\": .8, \"b\": .9})\n self.ConvertAndCompare(f_jax, x)\n\n def test_variable_input(self):\n f_jax = lambda x: jnp.sin(jnp.cos(x))\n f_tf = jax2tf.convert(f_jax)\n v = tf.Variable(0.7, dtype=jax2tf.dtype_of_val(0.7))\n self.assertIsInstance(f_tf(v), tf.Tensor)\n self.assertAllClose(f_jax(0.7), f_tf(v))\n\n def test_jit(self):\n f_jax = jax.jit(lambda x: jnp.sin(jnp.cos(x)))\n self.ConvertAndCompare(f_jax, 0.7)\n\n def test_nested_jit(self):\n f_jax = jax.jit(lambda x: jnp.sin(jax.jit(jnp.cos)(x)))\n f_tf = jax2tf.convert(f_jax)\n np.testing.assert_allclose(f_jax(0.7), f_tf(0.7))\n\n def test_converts_jax_arrays(self):\n f_tf = tf.function(lambda x: x)\n self.assertEqual(f_tf(jnp.zeros([])).numpy(), 0.)\n self.assertEqual(f_tf(jnp.ones([])).numpy(), 1.)\n f_tf = tf.function(lambda x: x + x)\n self.assertEqual(f_tf(jnp.ones([])).numpy(), 2.)\n\n # Test with ShardedDeviceArray.\n n = jax.local_device_count()\n mk_sharded = lambda f: jax.pmap(lambda x: x)(f([n]))\n f_tf = tf.function(lambda x: x)\n self.assertAllClose(f_tf(mk_sharded(jnp.zeros)).numpy(),\n np.zeros([n]))\n self.assertAllClose(f_tf(mk_sharded(jnp.ones)).numpy(),\n np.ones([n]))\n\n @jtu.skip_on_devices(\"gpu\")\n def test_bfloat16_passed_by_tf(self):\n f_jax = lambda a, b: a + b\n f_tf = tf.function(jax2tf.convert(f_jax),\n input_signature=[tf.TensorSpec([512, 512], tf.bfloat16),\n tf.TensorSpec([512, 512], tf.bfloat16)])\n self.assertIsNotNone(f_tf.get_concrete_function())\n\n @jtu.skip_on_devices(\"gpu\")\n def test_bfloat16_returned_by_jax(self):\n f_jax = lambda a, b: (a + b).astype(jnp.bfloat16)\n f_tf = jax2tf.convert(f_jax)\n self.assertEqual(f_tf(1., 2.).dtype, tf.bfloat16)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"_dtype={dtype.__name__}_function={with_function}\",\n dtype=dtype,\n with_function=with_function)\n for dtype in [np.int64, np.float64]\n for with_function in [True, False]))\n def test_converts_64bit(self, dtype=np.int64, with_function=False):\n if not config.jax_enable_x64:\n self.skipTest(\"requires x64 mode\")\n big_const = np.full((5,), 2 ** 33, dtype=dtype)\n self.ConvertAndCompare(jnp.sin, big_const)\n f_conv = jax2tf.convert(jnp.sin)\n if with_function:\n f_conv = tf.function(f_conv)\n # We check also when we pass tf.Variable or tf.Tensor into the\n # converted function\n self.assertAllClose(jnp.sin(big_const),\n f_conv(tf.Variable(big_const)))\n self.assertAllClose(jnp.sin(big_const),\n f_conv(tf.constant(big_const)))\n\n def test_64bit_behavior_enable_x64(self):\n if not config.jax_enable_x64:\n self.skipTest(\"requires x64 mode\")\n\n # JAX and TF have different default float types if JAX_ENABLE_X64=1\n self.assertEqual(tf.math.sin(0.7).dtype, tf.float32)\n self.assertEqual(jnp.sin(0.7).dtype, jnp.float64)\n\n # jax2tf.convert has the same behavior as JAX\n self.assertEqual(jax2tf.convert(jnp.sin)(0.7).dtype, tf.float64)\n\n def test_64bit_behavior_not_enable_x64(self):\n if config.jax_enable_x64:\n self.skipTest(\"requires not x64 mode\")\n\n # JAX and TF have same default float types if JAX_ENABLE_X64=1\n self.assertEqual(tf.math.sin(0.7).dtype, tf.float32)\n self.assertEqual(jnp.sin(0.7).dtype, jnp.float32)\n\n # Except that JAX forces values to 32-bit\n self.assertEqual(jnp.sin(np.float64(0.7)).dtype, jnp.float32)\n\n # jax2tf.convert has the same behavior as JAX\n self.assertEqual(jax2tf.convert(jnp.sin)(0.7).dtype, tf.float32)\n self.assertEqual(jax2tf.convert(jnp.sin)(np.float64(0.7)).dtype, tf.float32)\n\n def test_function(self):\n f_jax = jax.jit(lambda x: jnp.sin(jnp.cos(x)))\n self.ConvertAndCompare(f_jax, 0.7)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"function={with_function}\",\n with_function=with_function)\n for with_function in [False, True]))\n def test_gradients_disabled(self, with_function=False):\n f_tf = jax2tf.convert(jnp.tan, with_gradient=False)\n if with_function:\n f_tf = tf.function(f_tf, autograph=False)\n x = tf.ones([])\n\n # With tf.function the error is raised when we evaluate f_tf(x), in\n # eager mode when we evaluate tape.gradient(y, x)\n with self.assertRaisesRegex(LookupError,\n \"Gradient explicitly disabled.*The jax2tf-converted function does not support gradients\"):\n with tf.GradientTape() as tape:\n tape.watch(x)\n y = f_tf(x)\n _ = tape.gradient(y, x)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"function={with_function}\",\n with_function=with_function)\n for with_function in [False, True]))\n def test_gradients(self, with_function=True):\n def f(x, y):\n return x * x, x * y\n f_tf = jax2tf.convert(f, with_gradient=True)\n if with_function:\n f_tf = tf.function(f_tf, autograph=False)\n default_float_type = jax2tf.dtype_of_val(4.)\n x = tf.Variable(4., dtype=jax2tf.dtype_of_val(4.))\n y = tf.Variable(5., dtype=default_float_type)\n with tf.GradientTape(persistent=True) as tape:\n u, v = f_tf(x, y)\n\n self.assertAllClose(2. * 4., tape.gradient(u, x))\n self.assertAllClose(0., tape.gradient(u, y))\n self.assertAllClose(5., tape.gradient(v, x))\n self.assertAllClose(4., tape.gradient(v, y))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"function={with_function}\",\n with_function=with_function)\n for with_function in [False, True]))\n def test_gradients_pytree(self, with_function=True):\n def f(xy: Tuple[float, float]) -> Dict[str, float]:\n x, y = xy\n return dict(one=x * x, two=x * y)\n\n f_tf = jax2tf.convert(f, with_gradient=True)\n if with_function:\n f_tf = tf.function(f_tf, autograph=False)\n default_float_dtype = jax2tf.dtype_of_val(4.)\n x = tf.Variable(4., dtype=default_float_dtype)\n y = tf.Variable(5., dtype=default_float_dtype)\n with tf.GradientTape(persistent=True) as tape:\n uv = f_tf((x, y))\n\n self.assertAllClose(2. * 4., tape.gradient(uv[\"one\"], x))\n self.assertAllClose(0., tape.gradient(uv[\"one\"], y))\n self.assertAllClose(5., tape.gradient(uv[\"two\"], x))\n self.assertAllClose(4., tape.gradient(uv[\"two\"], y))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"function={with_function}\",\n with_function=with_function)\n for with_function in [False, True]))\n def test_gradients_with_custom_jvp(self, with_function=True):\n \"\"\"Check gradients, for a function with custom JVP.\"\"\"\n @jax.custom_jvp\n def f(x):\n return x * x\n\n @f.defjvp\n def f_jvp(primals, tangents):\n # 3 * x * x_t\n x, = primals\n x_dot, = tangents\n primal_out = f(x)\n tangent_out = 3. * x * x_dot\n return primal_out, tangent_out\n\n self.assertAllClose(4. * 4., f(4.))\n self.assertAllClose(3. * 4., jax.grad(f)(4.))\n\n f_tf = jax2tf.convert(f, with_gradient=True)\n if with_function:\n f_tf = tf.function(f_tf, autograph=False)\n self.assertAllClose(4. * 4., f_tf(4.))\n x = tf.Variable(4., dtype=jax2tf.dtype_of_val(4.))\n with tf.GradientTape() as tape:\n tape.watch(x)\n y = f_tf(x)\n\n self.assertAllClose(4. * 4., y)\n self.assertAllClose(3. * 4., tape.gradient(y, x))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"function={with_function}\",\n with_function=with_function)\n for with_function in [False, True]))\n def test_gradients_with_custom_vjp(self, with_function=True):\n \"\"\"Check gradients, for a function with custom VJP.\"\"\"\n @jax.custom_vjp\n def f(x):\n return x * x\n\n # f_fwd: a -> (b, residual)\n def f_fwd(x):\n return f(x), 3. * x\n # f_bwd: (residual, CT b) -> [CT a]\n def f_bwd(residual, ct_b):\n return residual * ct_b,\n\n f.defvjp(f_fwd, f_bwd)\n\n self.assertAllClose(4. * 4., f(4.))\n self.assertAllClose(3. * 4., jax.grad(f)(4.))\n\n f_tf = jax2tf.convert(f, with_gradient=True)\n if with_function:\n f_tf = tf.function(f_tf, autograph=False)\n self.assertAllClose(4. * 4., f_tf(4.))\n x = tf.Variable(4., dtype=jax2tf.dtype_of_val(4.))\n with tf.GradientTape() as tape:\n tape.watch(x)\n y = f_tf(x)\n\n self.assertAllClose(4. * 4., y)\n self.assertAllClose(3. * 4., tape.gradient(y, x))\n\n def test_gradient_with_float0_intermediate(self):\n # Gradient over integer-argument functions\n def f(x, y): # x is an int, y is a float\n return 2 * x + y\n\n def g(x): # x: f32\n return 2. * f(3 * x.astype(\"int32\"), x * 4.)\n\n x = 2.\n grad_g = jax.grad(g)\n self.ConvertAndCompare(grad_g, x)\n\n def test_gradient_with_float0_result(self):\n # Gradient over integer-argument functions, with float0 result\n def f(x, y): # x is an int, y is a float\n return 2 * x + y\n\n def g(x): # x: i32\n return jnp.sum(2. * f(3 * x, 4. * x.astype(\"float32\")))\n\n grad_g = jax.grad(g, allow_int=True)\n x = 2\n d_dx_jax = grad_g(x)\n d_dx_tf = jax2tf.convert(grad_g)(x)\n self.assertEqual(d_dx_jax.dtype, dtypes.float0)\n self.assertAllClose(jnp.zeros(np.shape(d_dx_jax), np.int32),\n d_dx_tf.numpy())\n\n shape = (3, 4)\n x = np.ones(shape, dtype=np.int32)\n d_dx_jax = grad_g(x)\n d_dx_tf = jax2tf.convert(grad_g)(x)\n self.assertEqual(d_dx_jax.dtype, dtypes.float0)\n self.assertAllClose(jnp.zeros(np.shape(d_dx_jax), np.int32),\n d_dx_tf.numpy())\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"function={with_function}\",\n with_function=with_function)\n for with_function in [False, True]))\n def test_gradients_unused_argument_readme(self, with_function=True):\n # x2 and x3 are not used. x3 has integer type.\n def fn(x0, x1, x2, x3):\n return x0 * 0. + x2 * 2.\n\n xs = [tf.Variable(x) for x in [10., 11., 12., 13]]\n with tf.GradientTape(persistent=True) as tape:\n res = fn(*xs)\n\n g_tf_native = tape.gradient(res, xs)\n self.assertAllClose(g_tf_native[0].numpy(), np.float32(0.))\n self.assertIsNone(g_tf_native[1])\n self.assertAllClose(g_tf_native[2].numpy(), np.float32(2.))\n self.assertIsNone(g_tf_native[3])\n\n g_tf_native_0 = tape.gradient(res, xs,\n unconnected_gradients=tf.UnconnectedGradients.ZERO)\n self.assertAllClose(g_tf_native_0[0].numpy(), np.float32(0.))\n self.assertAllClose(g_tf_native_0[1].numpy(), np.float32(0.))\n self.assertAllClose(g_tf_native_0[2].numpy(), np.float32(2.))\n self.assertAllClose(g_tf_native_0[3].numpy(), np.int32(0))\n\n # Now with jax2tf.convert\n with tf.GradientTape(persistent=True) as tape:\n conv_fn = jax2tf.convert(fn, with_gradient=True)\n if with_function:\n conv_fn = tf.function(conv_fn, autograph=False)\n res = conv_fn(*xs)\n\n g_jax2tf = tape.gradient(res, xs)\n # Returns: 0., 0., 2., None\n # Note that the gradient for x1 is 0.\n self.assertAllClose(g_jax2tf[0].numpy(), np.float32(0.))\n self.assertAllClose(g_jax2tf[1].numpy(), np.float32(0.))\n self.assertAllClose(g_jax2tf[2].numpy(), np.float32(2.))\n self.assertIsNone(g_jax2tf[3])\n\n g_jax2tf = tape.gradient(res, xs,\n unconnected_gradients=tf.UnconnectedGradients.ZERO)\n self.assertAllClose(g_jax2tf[0].numpy(), np.float32(0.))\n self.assertAllClose(g_jax2tf[1].numpy(), np.float32(0.))\n self.assertAllClose(g_jax2tf[2].numpy(), np.float32(2.))\n self.assertAllClose(g_jax2tf[3].numpy(), np.int32(0))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"function={with_function}\",\n with_function=with_function)\n for with_function in [False, True]))\n def test_gradients_int_argument(self, with_function=True):\n # https://github.com/google/jax/issues/6975\n # Also issue #6975.\n # An expanded version of test_gradients_unused_argument\n state = dict(\n float_used=np.array([0.7, 0.9], dtype=np.float32),\n float_passthrough=np.float16(1.),\n float_unused=np.array([1.1, 2.2, 3.3], dtype=np.float32),\n int_used=np.int16(5),\n int_passthrough=np.int8(7),\n int_unused=np.array([1, 2, 3], dtype=np.uint32),\n bool_used=np.array([True, False, False, True], dtype=np.bool_),\n bool_passthrough=np.array([True, False, False, True, False], dtype=np.bool_),\n bool_unused=np.array([[True, False], [False, True]], dtype=np.bool_),\n )\n def jax_f(state):\n res = dict(state,\n float_used=2. * state[\"float_used\"],\n int_used=3 * state[\"int_used\"],\n bool_used=(state[\"bool_used\"] == state[\"bool_used\"]))\n del res[\"float_unused\"]\n del res[\"int_unused\"]\n del res[\"bool_unused\"]\n return res\n\n args = (state,)\n res_jax = jax_f(*args)\n # Native JAX AD\n vjp_jax_fun, args_vjp = tf_test_util.TransformJaxVJP(jax_f, args, res_jax)\n grad_jax, = vjp_jax_fun(*args_vjp)\n\n def compare_with_overrides(*, what, expected, **expected_overrides):\n what_keys = set(what.keys())\n expected_keys = set(expected.keys())\n self.assertEqual(what_keys, expected_keys)\n for k, w in what.items():\n e = expected[k]\n if k in expected_overrides:\n if expected_overrides[k] == \"ZERO\":\n e = np.zeros_like(w)\n elif expected_overrides[k] == \"ZERO_INT32\":\n e = np.zeros(np.shape(w), dtype=np.int32)\n elif expected_overrides[k] == \"ONE\":\n e = np.ones_like(w)\n else:\n e = expected_overrides[k]\n\n if e is None:\n self.assertIsNone(w, msg=k)\n else:\n self.assertIsNotNone(w, msg=k)\n w = w.numpy() if isinstance(w, tf.Tensor) else e\n e = e.numpy() if isinstance(e, tf.Tensor) else e\n try:\n self.assertAllClose(e, w, err_msg=k)\n except:\n print(f\"Failed at {k}\")\n raise\n\n\n # compare_with_overrides(g_jax, {},\n # bool_passthrough=np.zeros(state[\"bool_passthrough\"].shape, dtype=dtypes.float0),\n # bool_unused=np.zeros(state[\"bool_unused\"].shape, dtype=dtypes.float0),\n # bool_used=np.zeros(state[\"bool_used\"].shape, dtype=dtypes.float0),\n # float_passthrough=np.ones_like(state[\"float_passthrough\"]),\n # float_unused=np.zeros_like(state[\"float_unused\"]),\n # float_used=np.ones_like(state[\"float_used\"]) * np.array(2., dtype=state[\"float_used\"].dtype),\n # int_passthrough=np.zeros(state[\"int_passthrough\"].shape, dtype=dtypes.float0),\n # int_unused=np.zeros(state[\"int_unused\"].shape, dtype=dtypes.float0),\n # int_used=np.zeros(state[\"int_used\"].shape, dtype=dtypes.float0))\n\n\n # Now native TF gradients, only to test how native TF AD works\n _, (grad_tf_0,) = tf_test_util.ComputeTfValueAndGrad(\n jax_f, args, unconnected_gradients=tf.UnconnectedGradients.ZERO)\n compare_with_overrides(what=grad_tf_0,\n expected=grad_jax,\n float_unused=\"ZERO\",\n bool_used=\"ZERO\", bool_passthrough=\"ONE\", bool_unused=\"ZERO\",\n int_used=\"ZERO\", int_passthrough=\"ONE\", int_unused=\"ZERO\")\n\n _, (grad_tf_None,) = tf_test_util.ComputeTfValueAndGrad(\n jax_f, args,\n unconnected_gradients=tf.UnconnectedGradients.NONE)\n compare_with_overrides(what=grad_tf_None,\n expected=grad_tf_0,\n float_unused=None, int_used=None, int_unused=None,\n bool_used=None, bool_unused=None)\n\n f_tf_jax = jax2tf.convert(jax_f)\n if with_function:\n f_tf_jax = tf.function(f_tf_jax, autograph=False)\n\n _, (grad_tf_jax_0,) = tf_test_util.ComputeTfValueAndGrad(f_tf_jax, args)\n # Same results as TF native AD with tf.UnconnectedGradients.ZERO\n compare_with_overrides(what=grad_tf_jax_0,\n expected=grad_tf_0,\n int_passthrough=\"ZERO\", bool_passthrough=\"ZERO\")\n\n _, (grad_tf_jax_None,) = tf_test_util.ComputeTfValueAndGrad(\n f_tf_jax, args,\n unconnected_gradients=tf.UnconnectedGradients.NONE)\n compare_with_overrides(what=grad_tf_jax_None,\n expected=grad_tf_0,\n int_used=None, int_passthrough=None, int_unused=None,\n bool_unused=None, bool_used=None, bool_passthrough=None)\n\n # Not convert the JAX gradient function\n tf_vjp_jax_fun = jax2tf.convert(vjp_jax_fun)\n grad_tf_vjp_jax, = tf_vjp_jax_fun(*args_vjp)\n compare_with_overrides(what=grad_tf_vjp_jax,\n expected=grad_tf_0,\n bool_passthrough=\"ZERO_INT32\",\n bool_unused=\"ZERO_INT32\", bool_used=\"ZERO_INT32\",\n int_passthrough=\"ZERO_INT32\", int_unused=\"ZERO_INT32\",\n int_used=\"ZERO_INT32\")\n\n def test_readme_gradient_int(self):\n x = np.array(2, dtype=np.int16)\n\n def f_jax(x): # x: int16\n return x.astype(np.float32) * 2.\n\n print(jax.grad(f_jax, allow_int=True)(x))\n # returns a special `float0`: array((b'',), dtype=[('float0', 'V')])\n\n print(jax2tf.convert(jax.grad(f_jax, allow_int=True))(x))\n # returns a 0 with same shape as x, but with dtype int32\n\n def f_tf(x): # x: int16\n return tf.cast(x, tf.float32) * 2.\n\n xv = tf.Variable(x)\n with tf.GradientTape(persistent=True) as tape:\n print(tape.gradient(f_tf(xv), xv))\n # returns None\n print(tape.gradient(f_tf(xv), xv,\n unconnected_gradients=tf.UnconnectedGradients.ZERO))\n # returns 0 with the same shape and dtype as x\n\n\n def test_convert_argument_non_callable_error(self):\n with self.assertRaisesRegex(TypeError, \"Expected a callable value\"):\n jax2tf.convert(5.)\n\n def test_convert_argument_non_tensor_error(self):\n with self.assertRaisesRegex(TypeError,\n \"Argument.*should be NumPy array\"):\n jax2tf.convert(lambda x: x)(lambda y: y)\n\n def test_argument_eager_tensor(self):\n x = jax2tf.convert(jnp.sin)(1.)\n jax2tf.convert(jnp.cos)(x) # No error\n\n def test_checkpoint_wrapper_types(self):\n m = tf.Module()\n m.a = [tf.Module(), tf.Module()]\n m.b = (tf.Module(), tf.Module())\n m.c = {'a': tf.Module(), 'b': tf.Module()}\n self.assertNotEqual(type(m.a), list)\n self.assertNotEqual(type(m.b), tuple)\n self.assertNotEqual(type(m.c), dict)\n self.assertLen(jax.tree_leaves(m.a), 2)\n self.assertLen(jax.tree_leaves(m.b), 2)\n self.assertLen(jax.tree_leaves(m.c), 2)\n\n def test_custom_jvp(self):\n \"\"\"Conversion of function with custom JVP\"\"\"\n\n @jax.custom_jvp\n def f(x):\n return x * x\n\n @f.defjvp\n def f_jvp(primals, tangents):\n x, = primals\n x_dot, = tangents\n primal_out = f(x)\n tangent_out = 3. * x * x_dot\n return primal_out, tangent_out\n\n arg = 0.7\n self.TransformConvertAndCompare(f, arg, None)\n self.TransformConvertAndCompare(f, arg, \"jvp\")\n self.TransformConvertAndCompare(f, arg, \"vmap\")\n self.TransformConvertAndCompare(f, arg, \"jvp_vmap\")\n self.TransformConvertAndCompare(f, arg, \"grad\")\n self.TransformConvertAndCompare(f, arg, \"grad_vmap\")\n\n def test_custom_vjp(self):\n \"\"\"Conversion of function with custom VJP\"\"\"\n\n @jax.custom_vjp\n def f(x):\n return x * x\n\n # f_fwd: a -> (b, residual)\n def f_fwd(x):\n return f(x), 3. * x\n\n # f_bwd: (residual, CT b) -> [CT a]\n def f_bwd(residual, ct_b):\n return residual * ct_b,\n\n f.defvjp(f_fwd, f_bwd)\n arg = 0.7\n self.TransformConvertAndCompare(f, arg, None)\n self.TransformConvertAndCompare(f, arg, \"vmap\")\n self.TransformConvertAndCompare(f, arg, \"grad\")\n self.TransformConvertAndCompare(f, arg, \"grad_vmap\")\n\n def test_remat1(self):\n @jax.remat\n def f(x1):\n x2 = jnp.sin(x1)\n x3 = jnp.sin(x2)\n x4 = jnp.sin(x3)\n return jnp.sum(x4)\n\n # The computation of grad_f computes \"sin\" 5 times, 3 for the forward pass\n # and then to rematerialize \"x2\" and \"x3\" in the backward pass.\n arg = np.arange(3.)\n self.TransformConvertAndCompare(f, arg, \"grad\")\n # TODO: check that the TF code also computes \"sin\" 5 times\n\n def test_remat_free_var(self):\n def f(x):\n y = 2 * x\n\n @jax.remat\n def g():\n return y\n\n return g()\n arg = 3.\n self.TransformConvertAndCompare(f, arg, None)\n self.TransformConvertAndCompare(f, arg, \"grad\")\n\n def test_convert_nullary_func(self):\n # Even nullary functions are converted to TF (as opposed to constant-folded\n # in JAX prior to conversion).\n def f_jax():\n return jnp.sin(1.)\n f_tf = tf.function(jax2tf.convert(f_jax), autograph=False)\n f_tf_graph = f_tf.get_concrete_function().graph.as_graph_def()\n self.assertIn('op: \"Sin\"', str(f_tf_graph))\n\n def test_convert_of_nested_independent_jit(self):\n def func(x):\n def inner1(y):\n return x + y\n # The JIT does not have data dependency\n return jax.jit(inner1)(1.)\n\n jax2tf.convert(func)(2.)\n\n def test_convert_of_nested_dependent_jit(self):\n def func(x):\n def inner1(y):\n return x + y\n # The JIT does have data dependency\n return jax.jit(inner1)(x)\n\n jax2tf.convert(func)(2.) # No error\n\n def test_nested_convert_error(self):\n def outer(y):\n return jax2tf.convert(jnp.sin)(y) # Inner convert takes tracer args\n with self.assertRaisesRegex(\n ValueError, \"convert must be used outside all JAX transformations\"):\n jax2tf.convert(outer)(np.ones((4, )))\n\n def test_nested_convert_error_non_tracer(self):\n \"\"\"The inner convert takes non-tracer arguments\"\"\"\n def outer(y):\n sin_1 = jax2tf.convert(jnp.sin)(1.) # Inner convert takes non-tracer arg\n return y + sin_1\n\n with self.assertRaisesRegex(\n ValueError, \"convert must be used outside all JAX transformations\"):\n jax2tf.convert(outer)(2.)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"_{transform}\", transform=transform)\n for transform in [\"jit\", \"jvp\", \"grad\", \"vmap\"]))\n def test_convert_under_transform_error(self, transform=\"vmap\"):\n def outer(y):\n return jax2tf.convert(jnp.sin)(y) # Inner convert takes tracer args\n\n with self.assertRaisesRegex(\n ValueError, \"convert must be used outside all JAX transformations\"):\n self.TransformConvertAndCompare(outer, np.ones((4,)), transform)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"_{transform}\", transform=transform)\n for transform in [\"jit\", \"jvp\", \"grad\", \"vmap\"]))\n def test_convert_under_transform_error_non_tracer(self, transform=\"vmap\"):\n def outer(y):\n sin_1 = jax2tf.convert(jnp.sin)(1.) # Inner convert takes non-tracer arg\n return y + sin_1\n\n with self.assertRaisesRegex(\n ValueError, \"convert must be used outside all JAX transformations\"):\n self.TransformConvertAndCompare(outer, np.ones((4,)), transform)\n\n def test_name_scope(self):\n log = []\n\n @jax.named_call\n def my_test_function(x):\n y = tf.Variable(1., name=\"foo\")\n log.append(y.name)\n return x * x\n\n def caller(x):\n return my_test_function(jnp.sin(x))\n\n jax2tf.convert(caller)(2.)\n self.assertIn(\"my_test_function/foo\", log[0])\n\n def test_bfloat16_constant(self):\n # Re: https://github.com/google/jax/issues/3942\n def jax_fn_scalar(x):\n x = x.astype(jnp.bfloat16)\n x *= 2.\n return x\n\n def jax_fn_array(x):\n x = x.astype(jnp.bfloat16)\n x *= np.array([1.5, 2.5, 3.5], jnp.bfloat16)\n return x\n\n tf_fn_scalar = jax2tf.convert(jax_fn_scalar)\n self.assertAllClose(tf_fn_scalar(1.375).numpy(), jnp.bfloat16(2.750))\n\n tf_fn_array = jax2tf.convert(jax_fn_array)\n self.assertAllClose(\n tf_fn_array(np.array([3, 4, 5])), np.array([4.5, 10, 17.5],\n jnp.bfloat16))\n\n def test_shared_constants(self):\n # Check that the constants are shared properly in converted functions\n # See https://github.com/google/jax/issues/7992.\n const = np.ones((16, 16))\n def f(x):\n return x + const + const + const + const\n\n f_tf_graph = tf.function(jax2tf.convert(f), autograph=False).get_concrete_function(const).graph.as_graph_def()\n f_tf_graph_nr_consts = len(re.findall(r'op:\\s*\"Const\"', str(f_tf_graph)))\n # It seems that there is already a shape constant in the graph, we want to\n # make sure our 4 instances of \"const\" are shared.\n self.assertEqual(f_tf_graph_nr_consts, 2)\n\n def test_weak_types(self):\n mul = jax.jit(jnp.multiply)\n # The value `2` here should be weakly typed, and should not lead to\n # promotion.\n tf_fn = jax2tf.convert(lambda x: mul(x, 2.))\n self.assertAllClose(tf_fn(tf.constant(1.375, tf.bfloat16)).numpy(),\n jnp.bfloat16(2.750))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"function={with_function}\",\n with_function=with_function)\n for with_function in [False, True]))\n def test_kwargs(self, with_function=True):\n # Re: https://github.com/google/jax/issues/6791\n def f_jax(*, x):\n return jnp.sum(x)\n f_tf = jax2tf.convert(f_jax)\n if with_function:\n f_tf = tf.function(f_tf)\n self.assertAllClose(\n f_tf(x=np.zeros(3, dtype=np.float32)), # Call with kwargs.\n np.zeros((), dtype=np.float32))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"function={with_function}\",\n with_function=with_function)\n for with_function in [False, True]))\n def test_grad_kwargs(self, with_function=False):\n # Re: https://github.com/google/jax/issues/6791\n x = (np.zeros(3, dtype=np.float32),\n np.zeros(4, dtype=np.float32))\n def f_jax(*, x=(1., 2.)):\n return jnp.sum(x[0]) + 2. * jnp.sum(x[1])\n f_tf = jax2tf.convert(f_jax)\n if with_function:\n f_tf = tf.function(f_tf)\n xv = tf.nest.map_structure(tf.Variable, x)\n with tf.GradientTape() as tape:\n res = f_tf(x=xv)\n grad_tf = tape.gradient(res, xv)\n self.assertAllClose((np.full_like(x[0], fill_value=1.),\n np.full_like(x[1], fill_value=2.)),\n (grad_tf[0].numpy(), grad_tf[1].numpy()))\n\n\n def test_enable_xla(self):\n # Tests that enable_xla flag is properly scoped to a conversion.\n def fun(x):\n # lax.reduce is unlikely to ever be convertible with enable_xla=False\n return lax.reduce(x, np.float32(0), lambda v, acc: v + acc, dimensions=(0, 1))\n\n tf_fun_with_xla = jax2tf.convert(fun, enable_xla=True)\n tf_fun_without_xla = jax2tf.convert(fun, enable_xla=False)\n x = np.ones((2, 3), dtype=np.float32)\n\n self.assertAllClose(fun(x), tf_fun_with_xla(x))\n with self.assertRaisesRegex(NotImplementedError,\n \"Call to reduce cannot be converted with enable_xla=False\"):\n tf_fun_without_xla(x)\n\n # Now in reverse order (we had bugs with the management of enable_xla global)\n tf_fun2_without_xla = jax2tf.convert(lambda x: fun(x), enable_xla=False)\n tf_fun2_with_xla = jax2tf.convert(lambda x: fun(x), enable_xla=True)\n\n with self.assertRaisesRegex(NotImplementedError,\n \"Call to reduce cannot be converted with enable_xla=False\"):\n tf_fun2_without_xla(x)\n self.assertAllClose(fun(x), tf_fun2_with_xla(x))\n\n def test_device_array_arg(self):\n self.ConvertAndCompare(jnp.sin, jnp.zeros((2, 3), jnp.float32))\n\n def test_randint(self):\n def randint():\n return jax.random.randint(\n jax.random.PRNGKey(42), shape=(), minval=0, maxval=1)\n\n self.ConvertAndCompare(randint)\n\n def test_op_metadata_simple(self):\n self.skipTest(\"include_xla_op_metadata not yet enabled\")\n # A simple example\n # The user_frame is used to compute line numbers for ops in the test.\n user_frame = source_info_util.user_frame(source_info_util.current())\n def f_simple(x):\n return jnp.sin(x)\n\n x = np.ones((2, 3), np.float32)\n self.CheckOpMetadata(\n f_simple, x,\n [tf_test_util.OpMetadataGraph(tf_type=\"Sin\",\n source_file=__file__,\n source_line=user_frame.line_num + 2,\n op_name=\"jax2tf(f_simple)/sin\",\n op_type=\"sin\")\n ]\n )\n\n def test_op_metadata_sub_jit(self):\n self.skipTest(\"include_xla_op_metadata not yet enabled\")\n # Calling a jitted-function\n # The user_frame is used to compute line numbers for ops in the test.\n user_frame = source_info_util.user_frame(source_info_util.current())\n def f_callee(x):\n return jnp.cos(x)\n def f_caller(x):\n y = jnp.tanh(x)\n z = jax.jit(f_callee)(y)\n return jnp.sin(z)\n\n x = np.ones((2, 3), np.float32)\n\n self.CheckOpMetadata(\n f_caller, x,\n [tf_test_util.OpMetadataGraph(tf_type=\"Tanh\",\n source_file=__file__,\n source_line=user_frame.line_num + 4,\n op_name=\"jax2tf(f_caller)/tanh\",\n op_type=\"tanh\"),\n tf_test_util.OpMetadataGraph(tf_type=\"Cos\",\n source_file=__file__,\n source_line=user_frame.line_num + 2,\n op_name=\"jax2tf(f_caller)/jit(f_callee)/cos\",\n op_type=\"cos\"),\n tf_test_util.OpMetadataGraph(tf_type=\"Sin\",\n source_file=__file__,\n source_line=user_frame.line_num + 6,\n op_name=\"jax2tf(f_caller)/sin\",\n op_type=\"sin\"),\n ]\n )\n\n def test_op_metadata_named(self):\n self.skipTest(\"include_xla_op_metadata not yet enabled\")\n # Calling a jax.named_call\n # The user_frame is used to compute line numbers for ops in the test.\n user_frame = source_info_util.user_frame(source_info_util.current())\n def f_callee(x):\n return jnp.cos(x)\n def f_caller(x):\n y = jnp.tanh(x)\n z = jax.named_call(f_callee, name=\"callee\")(y)\n return jnp.sin(z)\n\n x = np.ones((2, 3), np.float32)\n\n self.CheckOpMetadata(\n f_caller, x,\n [tf_test_util.OpMetadataGraph(tf_type=\"Tanh\",\n source_file=__file__,\n source_line=user_frame.line_num + 4,\n op_name=\"jax2tf(f_caller)/tanh\",\n op_type=\"tanh\"),\n tf_test_util.OpMetadataGraph(tf_type=\"Cos\",\n source_file=__file__,\n source_line=user_frame.line_num + 2,\n op_name=\"jax2tf(f_caller)/named(callee)/cos\",\n op_type=\"cos\"),\n tf_test_util.OpMetadataGraph(tf_type=\"Sin\",\n source_file=__file__,\n source_line=user_frame.line_num + 6,\n op_name=\"jax2tf(f_caller)/sin\",\n op_type=\"sin\"),\n ]\n )\n\n def test_op_metadata_while_and_cond(self):\n self.skipTest(\"include_xla_op_metadata not yet enabled\")\n # An example with while and cond\n # The user_frame is used to compute line numbers for ops in the test.\n user_frame = source_info_util.user_frame(source_info_util.current())\n def f_while_cond(x):\n def body_fun(i_acc):\n i, acc = i_acc\n return (i + 1,\n (jnp.cos(acc) +\n lax.cond(jnp.mod(i, 2) == 0,\n lambda acc: jnp.sin(acc),\n lambda acc: acc,\n acc)))\n\n _, acc = lax.while_loop(\n lambda i_acc: i_acc[0] <= 5,\n body_fun, (0, x))\n return acc\n\n x = np.ones((2, 3), np.float32)\n self.CheckOpMetadata(\n f_while_cond, x,\n [tf_test_util.OpMetadataGraph(tf_type=\"Cos\",\n source_file=__file__,\n source_line=user_frame.line_num + 5,\n op_name=\"jax2tf(f_while_cond)/while/body/cos\",\n op_type=\"cos\"),\n tf_test_util.OpMetadataGraph(tf_type=\"Sin\",\n source_file=__file__,\n source_line=user_frame.line_num + 7,\n op_name=\"jax2tf(f_while_cond)/while/body/branch_1_fun/sin\",\n op_type=\"sin\"),\n tf_test_util.OpMetadataGraph(tf_type=\"FloorMod\",\n source_file=__file__,\n source_line=user_frame.line_num + 6,\n op_name=\"jax2tf(f_while_cond)/while/body/rem\",\n op_type=\"rem\"),\n ]\n )\n\n def test_op_metadata_batched_while(self):\n self.skipTest(\"include_xla_op_metadata not yet enabled\")\n # An example with while and cond\n # The user_frame is used to compute line numbers for ops in the test.\n user_frame = source_info_util.user_frame(source_info_util.current())\n @jax.vmap\n def f_while(x):\n def body_fun(carry):\n new_carry = jnp.sin(carry) # We look for \"sin\" in the graph\n return new_carry\n\n _, carry = lax.while_loop(\n lambda carry: jnp.all(carry <= x), # We look for \"le\" in the graph\n body_fun, x)\n return carry\n\n shape = (3, 2)\n x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)\n\n jax_comp = jax.xla_computation(f_while)(x)\n backend = jax._src.lib.xla_bridge.get_backend()\n modules = backend.compile(jax_comp).hlo_modules()\n jax_opt_hlo = modules[0].to_string()\n print(f\"JAX OPT HLO = {jax_opt_hlo}\")\n\n self.CheckOpMetadata(\n f_while, x,\n [tf_test_util.OpMetadataGraph(tf_type=\"Sin\",\n source_file=__file__,\n source_line=user_frame.line_num + 4,\n op_name=\"jax2tf(f_while)/while/body/sin\",\n op_type=\"sin\"),\n tf_test_util.OpMetadataGraph(tf_type=\"LessEqual\",\n source_file=__file__,\n source_line=user_frame.line_num + 8,\n op_name=\"jax2tf(f_while)/while/body_pred/le\",\n op_type=\"le\"),\n ]\n )\n\n def test_op_metadata_disabled(self):\n self.skipTest(\"include_xla_op_metadata not yet enabled\")\n def f_simple(x):\n return jnp.sin(x)\n\n x = np.ones((2, 3), np.float32)\n self.CheckOpMetadata(\n f_simple, x,\n [],\n include_xla_op_metadata=False\n )\n\nif __name__ == \"__main__\":\n absltest.main(testLoader=jtu.JaxTestLoader())\n"
] |
[
[
"tensorflow.cast",
"numpy.zeros_like",
"numpy.ones_like",
"tensorflow.Variable",
"numpy.arange",
"numpy.float16",
"numpy.int8",
"numpy.full",
"tensorflow.math.sin",
"numpy.float32",
"numpy.zeros",
"numpy.full_like",
"tensorflow.function",
"numpy.array",
"tensorflow.GradientTape",
"tensorflow.constant",
"numpy.int32",
"tensorflow.ones",
"numpy.ones",
"numpy.int16",
"tensorflow.Module",
"numpy.shape",
"numpy.float64",
"numpy.prod",
"tensorflow.TensorSpec",
"tensorflow.nest.map_structure"
]
] |
tmoer/a0c
|
[
"62c4aa793205294d5b3a99d192e9b6311f4d34a6"
] |
[
"src/rl/atariwrapper.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nAtari wrapper, based on https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py\n@author: thomas\n\"\"\"\nimport gym\nfrom gym import spaces\nfrom collections import deque\nimport numpy as np\nfrom PIL import Image\n\nclass ClipRewardWrapper(gym.RewardWrapper):\n def reward(self, reward):\n \"\"\"Bin reward to {+1, 0, -1} by its sign.\"\"\"\n return 0.5 * np.sign(reward)\n\nclass AtariWrapper(gym.Wrapper):\n ''' Chain domain '''\n \n def __init__(self, env, skip=4, k=4,ram=False):\n \"\"\"Return only every `skip`-th frame\"\"\"\n gym.Wrapper.__init__(self, env)\n # Frame skip and pooling\n self._obs_buffer = deque(maxlen=skip)\n self._skip = skip \n self._ram = ram\n\n # Frame stacking\n self.k = k\n self.frames = deque([], maxlen=k)\n \n # Frame wrapping\n if not self._ram:\n self.res = 84\n self.observation_space = spaces.Box(low=0, high=1, shape=(self.res,self.res, k))\n else:\n self.res = env.observation_space.shape[0]\n self.observation_space = spaces.Box(low=0, high=1, shape=(self.res, k))\n\n def _observation(self):\n assert len(self.frames) == self.k\n return np.concatenate(self.frames, axis=-1) \n\n def _resize(self, obs):\n if not self._ram:\n frame = np.dot(obs.astype('float32'), np.array([0.299, 0.587, 0.114], 'float32'))\n frame = np.array(Image.fromarray(frame).resize((self.res, self.res),\n resample=Image.BILINEAR), dtype=np.float32)/255.0\n return frame.reshape((self.res, self.res, 1))\n else:\n obs = obs/255\n return obs.astype('float32').reshape((self.res,1))\n \n def _reset(self):\n \"\"\"Clear buffers and re-fill by duplicating the first observation.\"\"\"\n ob = self.env.reset()\n ob = self._resize(ob)\n for _ in range(self.k): self.frames.append(ob)\n self._obs_buffer.clear()\n for _ in range(self._skip): self._obs_buffer.append(ob)\n return self._observation()\n \n def _step(self, action):\n \"\"\"Repeat action, sum reward, and max over last observations.\"\"\"\n total_reward = 0.0\n done = None\n for _ in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n obs = self._resize(obs)\n self._obs_buffer.append(obs)\n total_reward += reward\n if done:\n break\n if not self._ram:\n max_frame = np.max(np.stack(self._obs_buffer), axis=0) # max over skips\n else:\n max_frame = obs # just take the last, max has no interpretation\n self.frames.append(max_frame) # append to buffer\n return self._observation(), total_reward, done, info\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.stack",
"numpy.sign"
]
] |
sjuvekar/neon
|
[
"abe5d30a68663c739a97a9e657516d530c66dbd9"
] |
[
"tests/test_merge_layer.py"
] |
[
"# ----------------------------------------------------------------------------\n# Copyright 2015 Nervana Systems Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\n'''\nTest of the merge layer with linear layers\n'''\nimport itertools as itt\nimport numpy as np\n\nfrom neon import NervanaObject\nfrom neon.initializers.initializer import Uniform\nfrom neon.layers import Affine, MergeMultistream, Sequential\n\n\ndef pytest_generate_tests(metafunc):\n fargs = []\n eps = np.finfo(np.float32).eps\n # weight ranges\n w_rng = [[-1.0, 1.0]]\n rng_max = [eps]\n fargs = itt.product(w_rng, rng_max)\n metafunc.parametrize('allrand_args', fargs)\n\n\ndef test_concat_l1_l1(backend_default, allrand_args):\n # test two linear layers that are merged with concat\n dtypeu = np.float32\n w_rng, rngmax = allrand_args\n # Diff size inputs and outputs\n nins = [128, 1024]\n nouts = [64, 2048]\n batch_size = 16\n NervanaObject.be.bsz = NervanaObject.be.bs = batch_size\n be = NervanaObject.be\n\n init_unif = Uniform(low=w_rng[0], high=w_rng[1])\n layers = [Sequential(Affine(nout=nout, init=init_unif)) for nout in nouts]\n inputs = [be.array(dtypeu(np.random.random((nin, batch_size)))) for nin in nins]\n merge = MergeMultistream(layers, merge=\"stack\")\n assert(len(inputs) == len(layers))\n merge.configure(inputs)\n merge.allocate()\n merge.set_deltas(None)\n out = merge.fprop(inputs).asnumpyarray()\n\n sublayers = [s.layers[0] for s in layers]\n weights = [layer.W.asnumpyarray() for layer in sublayers]\n out_exp = np.concatenate([np.dot(w, inp.get()) for (w, inp) in zip(weights, inputs)])\n\n assert np.allclose(out, out_exp, atol=1e-3)\n\n err_lst = [dtypeu(np.random.random((nout, batch_size))) for nout in nouts]\n err_concat = np.concatenate(err_lst)\n merge.bprop(be.array(err_concat))\n dW_exp_lst = [np.dot(err, inp.asnumpyarray().T) for (err, inp) in zip(err_lst, inputs)]\n\n for layer, dW_exp in zip(sublayers, dW_exp_lst):\n assert np.allclose(layer.dW.asnumpyarray(), dW_exp)\n return\n\n\ndef test_concat_sequence_l1_l1(backend_default, allrand_args):\n # test two linear layers that are merged with concat\n dtypeu = np.float32\n w_rng, rngmax = allrand_args\n # Diff size input steps\n nin = 128\n steps = [32, 64]\n nout = 256\n batch_size = 16\n NervanaObject.be.bsz = NervanaObject.be.bs = batch_size\n be = NervanaObject.be\n\n init_unif = Uniform(low=w_rng[0], high=w_rng[1])\n layers = [Sequential(Affine(nout=nout, init=init_unif)) for _ in range(2)]\n inputs = [be.array(dtypeu(np.random.random((nin, batch_size*step))))\n for step in steps]\n merge = MergeMultistream(layers, merge=\"recurrent\")\n assert(len(inputs) == len(layers))\n merge.configure(inputs)\n merge.allocate()\n merge.set_deltas(None)\n out = merge.fprop(inputs).asnumpyarray()\n\n sublayers = [s.layers[0] for s in layers]\n weights = [layer.W.asnumpyarray() for layer in sublayers]\n out_exp = np.concatenate([np.dot(w, inp.get()) for (w, inp) in zip(weights, inputs)], axis=1)\n\n assert np.allclose(out, out_exp, atol=1e-3)\n\n err_lst = [dtypeu(np.random.random((nout, batch_size*step))) for step in steps]\n err_concat = be.array(np.concatenate(err_lst, axis=1))\n merge.bprop(err_concat)\n dW_exp_lst = [np.dot(err, inp.asnumpyarray().T) for (err, inp) in zip(err_lst, inputs)]\n\n for layer, dW_exp in zip(sublayers, dW_exp_lst):\n assert np.allclose(layer.dW.asnumpyarray(), dW_exp)\n return\n"
] |
[
[
"numpy.concatenate",
"numpy.random.random",
"numpy.allclose",
"numpy.finfo"
]
] |
LaudateCorpus1/audio
|
[
"a007e922d34028270197c0549bf452b79499d039"
] |
[
"test/torchaudio_unittest/models/rnnt/rnnt_test_impl.py"
] |
[
"import torch\nfrom torchaudio.models import emformer_rnnt_model\nfrom torchaudio_unittest.common_utils import TestBaseMixin, torch_script\n\n\nclass RNNTTestImpl(TestBaseMixin):\n def _get_input_config(self):\n model_config = self._get_model_config()\n return {\n \"batch_size\": 8,\n \"max_input_length\": 61,\n \"num_symbols\": model_config[\"num_symbols\"],\n \"max_target_length\": 23,\n \"input_dim\": model_config[\"input_dim\"],\n \"right_context_length\": model_config[\"right_context_length\"],\n \"encoding_dim\": model_config[\"encoding_dim\"],\n \"joiner_max_input_length\": 61 // model_config[\"time_reduction_stride\"],\n \"segment_length\": model_config[\"segment_length\"],\n \"time_reduction_stride\": model_config[\"time_reduction_stride\"],\n }\n\n def _get_model_config(self):\n return {\n \"input_dim\": 80,\n \"encoding_dim\": 128,\n \"num_symbols\": 256,\n \"segment_length\": 16,\n \"right_context_length\": 4,\n \"time_reduction_input_dim\": 128,\n \"time_reduction_stride\": 4,\n \"transformer_num_heads\": 4,\n \"transformer_ffn_dim\": 64,\n \"transformer_num_layers\": 3,\n \"transformer_dropout\": 0.0,\n \"transformer_activation\": \"relu\",\n \"transformer_left_context_length\": 30,\n \"transformer_max_memory_size\": 0,\n \"transformer_weight_init_scale_strategy\": \"depthwise\",\n \"transformer_tanh_on_mem\": True,\n \"symbol_embedding_dim\": 64,\n \"num_lstm_layers\": 2,\n \"lstm_layer_norm\": True,\n \"lstm_layer_norm_epsilon\": 1e-3,\n \"lstm_dropout\": 0.0,\n }\n\n def _get_model(self):\n return emformer_rnnt_model(**self._get_model_config()).to(device=self.device, dtype=self.dtype).eval()\n\n def _get_transcriber_input(self):\n input_config = self._get_input_config()\n batch_size = input_config[\"batch_size\"]\n max_input_length = input_config[\"max_input_length\"]\n input_dim = input_config[\"input_dim\"]\n right_context_length = input_config[\"right_context_length\"]\n\n torch.random.manual_seed(31)\n input = torch.rand(batch_size, max_input_length + right_context_length, input_dim).to(\n device=self.device, dtype=self.dtype\n )\n lengths = torch.randint(1, max_input_length + 1, (batch_size,)).to(device=self.device, dtype=torch.int32)\n return input, lengths\n\n def _get_transcriber_streaming_input(self):\n input_config = self._get_input_config()\n batch_size = input_config[\"batch_size\"]\n segment_length = input_config[\"segment_length\"]\n input_dim = input_config[\"input_dim\"]\n right_context_length = input_config[\"right_context_length\"]\n\n torch.random.manual_seed(31)\n input = torch.rand(batch_size, segment_length + right_context_length, input_dim).to(\n device=self.device, dtype=self.dtype\n )\n lengths = torch.randint(1, segment_length + right_context_length + 1, (batch_size,)).to(\n device=self.device, dtype=torch.int32\n )\n return input, lengths\n\n def _get_predictor_input(self):\n input_config = self._get_input_config()\n batch_size = input_config[\"batch_size\"]\n num_symbols = input_config[\"num_symbols\"]\n max_target_length = input_config[\"max_target_length\"]\n\n torch.random.manual_seed(31)\n input = torch.randint(0, num_symbols, (batch_size, max_target_length)).to(device=self.device, dtype=torch.int32)\n lengths = torch.randint(1, max_target_length + 1, (batch_size,)).to(device=self.device, dtype=torch.int32)\n return input, lengths\n\n def _get_joiner_input(self):\n input_config = self._get_input_config()\n batch_size = input_config[\"batch_size\"]\n joiner_max_input_length = input_config[\"joiner_max_input_length\"]\n max_target_length = input_config[\"max_target_length\"]\n input_dim = input_config[\"encoding_dim\"]\n\n torch.random.manual_seed(31)\n utterance_encodings = torch.rand(batch_size, joiner_max_input_length, input_dim).to(\n device=self.device, dtype=self.dtype\n )\n utterance_lengths = torch.randint(0, joiner_max_input_length + 1, (batch_size,)).to(\n device=self.device, dtype=torch.int32\n )\n target_encodings = torch.rand(batch_size, max_target_length, input_dim).to(device=self.device, dtype=self.dtype)\n target_lengths = torch.randint(0, max_target_length + 1, (batch_size,)).to(\n device=self.device, dtype=torch.int32\n )\n return utterance_encodings, utterance_lengths, target_encodings, target_lengths\n\n def test_torchscript_consistency_forward(self):\n r\"\"\"Verify that scripting RNNT does not change the behavior of method `forward`.\"\"\"\n inputs, input_lengths = self._get_transcriber_input()\n targets, target_lengths = self._get_predictor_input()\n\n rnnt = self._get_model()\n scripted = torch_script(rnnt).eval()\n\n ref_state, scripted_state = None, None\n for _ in range(2):\n ref_out, ref_input_lengths, ref_target_lengths, ref_state = rnnt(\n inputs, input_lengths, targets, target_lengths, ref_state\n )\n (\n scripted_out,\n scripted_input_lengths,\n scripted_target_lengths,\n scripted_state,\n ) = scripted(inputs, input_lengths, targets, target_lengths, scripted_state)\n\n self.assertEqual(ref_out, scripted_out)\n self.assertEqual(ref_input_lengths, scripted_input_lengths)\n self.assertEqual(ref_target_lengths, scripted_target_lengths)\n self.assertEqual(ref_state, scripted_state)\n\n def test_torchscript_consistency_transcribe(self):\n r\"\"\"Verify that scripting RNNT does not change the behavior of method `transcribe`.\"\"\"\n input, lengths = self._get_transcriber_input()\n\n rnnt = self._get_model()\n scripted = torch_script(rnnt)\n\n ref_out, ref_lengths = rnnt.transcribe(input, lengths)\n scripted_out, scripted_lengths = scripted.transcribe(input, lengths)\n\n self.assertEqual(ref_out, scripted_out)\n self.assertEqual(ref_lengths, scripted_lengths)\n\n def test_torchscript_consistency_transcribe_streaming(self):\n r\"\"\"Verify that scripting RNNT does not change the behavior of method `transcribe_streaming`.\"\"\"\n input, lengths = self._get_transcriber_streaming_input()\n\n rnnt = self._get_model()\n scripted = torch_script(rnnt)\n\n ref_state, scripted_state = None, None\n for _ in range(2):\n ref_out, ref_lengths, ref_state = rnnt.transcribe_streaming(input, lengths, ref_state)\n (\n scripted_out,\n scripted_lengths,\n scripted_state,\n ) = scripted.transcribe_streaming(input, lengths, scripted_state)\n\n self.assertEqual(ref_out, scripted_out)\n self.assertEqual(ref_lengths, scripted_lengths)\n self.assertEqual(ref_state, scripted_state)\n\n def test_torchscript_consistency_predict(self):\n r\"\"\"Verify that scripting RNNT does not change the behavior of method `predict`.\"\"\"\n input, lengths = self._get_predictor_input()\n\n rnnt = self._get_model()\n scripted = torch_script(rnnt)\n\n ref_state, scripted_state = None, None\n for _ in range(2):\n ref_out, ref_lengths, ref_state = rnnt.predict(input, lengths, ref_state)\n scripted_out, scripted_lengths, scripted_state = scripted.predict(input, lengths, scripted_state)\n self.assertEqual(ref_out, scripted_out)\n self.assertEqual(ref_lengths, scripted_lengths)\n self.assertEqual(ref_state, scripted_state)\n\n def test_torchscript_consistency_join(self):\n r\"\"\"Verify that scripting RNNT does not change the behavior of method `join`.\"\"\"\n (\n utterance_encodings,\n utterance_lengths,\n target_encodings,\n target_lengths,\n ) = self._get_joiner_input()\n\n rnnt = self._get_model()\n scripted = torch_script(rnnt)\n\n ref_out, ref_src_lengths, ref_tgt_lengths = rnnt.join(\n utterance_encodings, utterance_lengths, target_encodings, target_lengths\n )\n scripted_out, scripted_src_lengths, scripted_tgt_lengths = scripted.join(\n utterance_encodings, utterance_lengths, target_encodings, target_lengths\n )\n self.assertEqual(ref_out, scripted_out)\n self.assertEqual(ref_src_lengths, scripted_src_lengths)\n self.assertEqual(ref_tgt_lengths, scripted_tgt_lengths)\n\n def test_output_shape_forward(self):\n r\"\"\"Check that method `forward` produces correctly-shaped outputs.\"\"\"\n input_config = self._get_input_config()\n batch_size = input_config[\"batch_size\"]\n joiner_max_input_length = input_config[\"joiner_max_input_length\"]\n max_target_length = input_config[\"max_target_length\"]\n num_symbols = input_config[\"num_symbols\"]\n\n inputs, input_lengths = self._get_transcriber_input()\n targets, target_lengths = self._get_predictor_input()\n\n rnnt = self._get_model()\n\n state = None\n for _ in range(2):\n out, out_lengths, target_lengths, state = rnnt(inputs, input_lengths, targets, target_lengths, state)\n self.assertEqual(\n (batch_size, joiner_max_input_length, max_target_length, num_symbols),\n out.shape,\n )\n self.assertEqual((batch_size,), out_lengths.shape)\n self.assertEqual((batch_size,), target_lengths.shape)\n\n def test_output_shape_transcribe(self):\n r\"\"\"Check that method `transcribe` produces correctly-shaped outputs.\"\"\"\n input_config = self._get_input_config()\n batch_size = input_config[\"batch_size\"]\n max_input_length = input_config[\"max_input_length\"]\n\n input, lengths = self._get_transcriber_input()\n\n model_config = self._get_model_config()\n encoding_dim = model_config[\"encoding_dim\"]\n time_reduction_stride = model_config[\"time_reduction_stride\"]\n rnnt = self._get_model()\n\n out, out_lengths = rnnt.transcribe(input, lengths)\n self.assertEqual(\n (batch_size, max_input_length // time_reduction_stride, encoding_dim),\n out.shape,\n )\n self.assertEqual((batch_size,), out_lengths.shape)\n\n def test_output_shape_transcribe_streaming(self):\n r\"\"\"Check that method `transcribe_streaming` produces correctly-shaped outputs.\"\"\"\n input_config = self._get_input_config()\n batch_size = input_config[\"batch_size\"]\n segment_length = input_config[\"segment_length\"]\n encoding_dim = input_config[\"encoding_dim\"]\n time_reduction_stride = input_config[\"time_reduction_stride\"]\n\n input, lengths = self._get_transcriber_streaming_input()\n\n rnnt = self._get_model()\n\n state = None\n for _ in range(2):\n out, out_lengths, state = rnnt.transcribe_streaming(input, lengths, state)\n self.assertEqual(\n (batch_size, segment_length // time_reduction_stride, encoding_dim),\n out.shape,\n )\n self.assertEqual((batch_size,), out_lengths.shape)\n\n def test_output_shape_predict(self):\n r\"\"\"Check that method `predict` produces correctly-shaped outputs.\"\"\"\n input_config = self._get_input_config()\n batch_size = input_config[\"batch_size\"]\n max_target_length = input_config[\"max_target_length\"]\n\n model_config = self._get_model_config()\n encoding_dim = model_config[\"encoding_dim\"]\n input, lengths = self._get_predictor_input()\n\n rnnt = self._get_model()\n\n state = None\n for _ in range(2):\n out, out_lengths, state = rnnt.predict(input, lengths, state)\n self.assertEqual((batch_size, max_target_length, encoding_dim), out.shape)\n self.assertEqual((batch_size,), out_lengths.shape)\n\n def test_output_shape_join(self):\n r\"\"\"Check that method `join` produces correctly-shaped outputs.\"\"\"\n input_config = self._get_input_config()\n batch_size = input_config[\"batch_size\"]\n joiner_max_input_length = input_config[\"joiner_max_input_length\"]\n max_target_length = input_config[\"max_target_length\"]\n num_symbols = input_config[\"num_symbols\"]\n\n (\n utterance_encodings,\n utterance_lengths,\n target_encodings,\n target_lengths,\n ) = self._get_joiner_input()\n\n rnnt = self._get_model()\n\n out, src_lengths, tgt_lengths = rnnt.join(\n utterance_encodings, utterance_lengths, target_encodings, target_lengths\n )\n self.assertEqual(\n (batch_size, joiner_max_input_length, max_target_length, num_symbols),\n out.shape,\n )\n self.assertEqual((batch_size,), src_lengths.shape)\n self.assertEqual((batch_size,), tgt_lengths.shape)\n"
] |
[
[
"torch.random.manual_seed",
"torch.randint",
"torch.rand"
]
] |
gengchenmai/space2vec
|
[
"a29793336e6a1ebdb497289c286a0b4d5a83079f",
"a29793336e6a1ebdb497289c286a0b4d5a83079f"
] |
[
"geo_prior/geo_prior/losses.py",
"geo_prior/gen_figs/plot_all_locations.py"
] |
[
"import torch\nimport utils as ut\nimport math\n\n\ndef bce_loss(pred):\n return -torch.log(pred + 1e-5)\n\n\ndef rand_samples(batch_size, params, rand_type='uniform'):\n '''\n randomly sample background locations, generate (lon, lat, date) and put into pre loc encoder\n Note that the generated (lon, lat) are between [-1, 1] for geo_net\n But for our spa_enc, they generate real (lat, lon)\n Return:\n rand_feats: shape (batch_size, input_feat_dim)\n '''\n spa_enc_type = params['spa_enc_type']\n\n # randomly sample background locations and date\n # the generated location and date from [-1, 1]\n rand_feats_orig = torch.rand(batch_size, 3).to(params['device'])*2 -1\n\n if rand_type == 'spherical':\n # theta is between (0, 2*pi), computed based on latitude\n theta = ((rand_feats_orig[:,1].unsqueeze(1)+1) / 2.0)*(2*math.pi)\n r_lon = torch.sqrt(1.0 - rand_feats_orig[:,0].unsqueeze(1)**2) * torch.cos(theta)\n r_lat = torch.sqrt(1.0 - rand_feats_orig[:,0].unsqueeze(1)**2) * torch.sin(theta)\n # rand_feats_orig: (batch_size, 3)\n rand_feats_orig = torch.cat((r_lon, r_lat, rand_feats_orig[:,2].unsqueeze(1)), 1)\n\n if spa_enc_type == \"geo_net\":\n rand_feats = ut.encode_loc_time(rand_feats_orig[:,:2], rand_feats_orig[:,2], concat_dim=1, params=params)\n \n elif spa_enc_type in ut.get_spa_enc_list():\n lon = torch.unsqueeze(rand_feats_orig[:,0] * 180, dim = 1)\n lat = torch.unsqueeze(rand_feats_orig[:,1] * 90, dim = 1)\n # rand_feats: shape (batch_size, input_feat_dim = 2)\n rand_feats = torch.cat((lon, lat), 1).to(params[\"device\"])\n else:\n raise Exception(\"spa_enc not defined!!!\")\n\n return rand_feats\n\n\ndef embedding_loss(model, params, loc_feat, loc_class, user_ids, inds):\n '''\n Args:\n model:\n param:\n loc_feat: shape (batch_size, input_feat_dim)\n loc_class: shape (batch_size)\n user_ids: shape (batch_size)\n inds: tensor, [0,1,2,...,batch_size-1]\n '''\n\n assert model.inc_bias == False\n batch_size = loc_feat.shape[0]\n\n # create random background samples\n # loc_feat_rand: (batch_size, input_feat_dim)\n loc_feat_rand = rand_samples(batch_size, params, rand_type='spherical')\n\n # get location embeddings\n # loc_cat: (2*batch_size, input_feat_dim)\n loc_cat = torch.cat((loc_feat, loc_feat_rand), 0)\n loc_emb_cat = model(loc_cat, return_feats=True)\n # the location embedding for training samples, (batch_size, num_filts)\n loc_emb = loc_emb_cat[:batch_size, :]\n # the location embedding for random selected samples, (batch_size, num_filts)\n loc_emb_rand = loc_emb_cat[batch_size:, :]\n\n # the prediction distribution for training samples, (batch_size, num_classes)\n loc_pred = torch.sigmoid(model.class_emb(loc_emb))\n # the prediction distribution for random selected samples, (batch_size, num_classes)\n loc_pred_rand = torch.sigmoid(model.class_emb(loc_emb_rand))\n\n # data loss\n # see equation 7 in paper https://arxiv.org/abs/1906.05272\n pos_weight = params['num_classes']\n # loss_pos: (batch_size, num_classes)\n loss_pos = bce_loss(1.0 - loc_pred) # neg\n # update probability at the training sample's correct class\n loss_pos[inds[:batch_size], loc_class] = pos_weight*bce_loss(loc_pred[inds[:batch_size], loc_class]) # pos\n loss_bg = bce_loss(1.0 - loc_pred_rand)\n\n if 'user' in params['train_loss']:\n\n # user location loss\n # see equation 8 in paper https://arxiv.org/abs/1906.05272\n\n # note: self.user_emb.weight shape (num_users, num_filts)\n # get the user embedding for each data sample\n # user: (batch_size, num_filts)\n user = model.user_emb.weight[user_ids, :]\n # p_u_given_l/p_u_given_randl: (batch_size)\n p_u_given_l = torch.sigmoid((user*loc_emb).sum(1))\n p_u_given_randl = torch.sigmoid((user*loc_emb_rand).sum(1))\n\n # user_loc_pos_loss/user_loc_neg_loss: (batch_size)\n user_loc_pos_loss = bce_loss(p_u_given_l)\n user_loc_neg_loss = bce_loss(1.0 - p_u_given_randl)\n\n # user class loss\n # see equation 9 in paper https://arxiv.org/abs/1906.05272\n # p_c_given_u: (batch_size, num_classes)\n p_c_given_u = torch.sigmoid(torch.matmul(user, model.class_emb.weight.transpose(0,1)))\n # user_class_loss: (batch_size, num_classes)\n user_class_loss = bce_loss(1.0 - p_c_given_u)\n user_class_loss[inds[:batch_size], loc_class] = pos_weight*bce_loss(p_c_given_u[inds[:batch_size], loc_class])\n\n # total loss\n loss = loss_pos.mean() + loss_bg.mean() + user_loc_pos_loss.mean() + \\\n user_loc_neg_loss.mean() + user_class_loss.mean()\n\n else:\n\n # total loss\n loss = loss_pos.mean() + loss_bg.mean()\n\n return loss\n",
"\"\"\"\nPlots all the observation locations from the training set.\n\"\"\"\n\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nimport sys\nsys.path.append('../')\nfrom geo_prior.paths import get_paths\nimport geo_prior.datasets as dt\n\n\nop_dir = 'images/all_locs/'\nif not os.path.isdir(op_dir):\n os.makedirs(op_dir)\n\n# load ocean mask\nmask = np.load(get_paths('mask_dir') + 'ocean_mask.npy')\nmask_lines = (np.gradient(mask)[0]**2 + np.gradient(mask)[1]**2)\nmask_lines[mask_lines > 0.0] = 1.0\n\nparams = {}\nparams['dataset'] = 'inat_2017' # inat_2018, inat_2017, birdsnap, nabirds, yfcc\nparams['meta_type'] = ''\nparams['map_range'] = (-180, 180, -90, 90)\n\n# load dataset\nop = dt.load_dataset(params, 'val', True, True)\ntrain_locs = op['train_locs']\ntrain_classes = op['train_classes']\ntrain_users = op['train_users']\ntrain_dates = op['train_dates']\nclasses = op['classes']\n#class_of_interest = op['class_of_interest']\n\n\n# plot GT locations\nplt.close('all')\nim_width = mask_lines.shape[1]\nim_height = mask_lines.shape[0]\nplt.figure(num=0, figsize=[im_width/250, im_height/250], dpi=100)\nplt.imshow(1-mask_lines, extent=params['map_range'], cmap='gray')\n\n#inds = np.where(train_classes==class_of_interest)[0]\n#print('{} instances of: '.format(len(inds)) + classes[class_of_interest])\ninds = np.arange(train_locs.shape[0])\n\n# the color of the dot indicates the date\ncolors = np.sin(np.pi*train_dates[inds])\nplt.scatter(train_locs[inds, 0], train_locs[inds, 1], c=colors, s=2, cmap='magma', vmin=0, vmax=1)\n\nplt.gca().axes.get_xaxis().set_visible(False)\nplt.gca().axes.get_yaxis().set_visible(False)\nplt.gca().set_frame_on(False)\nplt.tight_layout()\n\nop_file_name = op_dir + params['dataset'] + '_all_locs.png'\nplt.savefig(op_file_name, dpi=500, bbox_inches='tight', pad_inches=0)\n\n"
] |
[
[
"torch.sin",
"torch.cat",
"torch.unsqueeze",
"torch.log",
"torch.rand",
"torch.cos"
],
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.scatter",
"numpy.gradient",
"numpy.arange",
"matplotlib.pyplot.savefig",
"numpy.sin",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure"
]
] |
mmuellerstoffels/GBSTools
|
[
"aebd8aa6667a2284aaa16424f9b9d22ca3a2a375"
] |
[
"MiGRIDS/Analyzer/PerformanceAnalyzers/getGenConfigChanges.py"
] |
[
"# Project: GBS Tool\n# Author: Dr. Marc Mueller-Stoffels, [email protected], denamics GmbH\n# Date: March 5, 2018\n# License: MIT License (see LICENSE file of this package for more information)\n\nimport numpy as np\n\n\ndef getGenConfigChanges(genAllP):\n \"\"\"\n Calculates the total number of configuration changes for the diesel power house. This assumes that a 'genP' channel\n reading 0 kW means that generator is offline. If the channel reads non-zero and positive the generator is assumed to\n be online.\n Note: if negative values are present, they are deleted [set to 0]\n\n :param genAllP: [DataFrame] the real power channels for the generator fleet. Function checks if a time channel is\n included and ditches it if needed.\n :return genConfigDeltaTot: [int] total number of generator configuration changes.\n \"\"\"\n\n # Check if time stamps were passed along. If so, remove.\n if 'time' in genAllP:\n genAllP = genAllP.drop('time', 1)\n\n # Just get the booleans for generator running or not (based on non-zero power).\n genBools = np.sign(genAllP)\n\n # Should there be negative values present (reverse power!) delete those (set to 0).\n genBools[genBools < 0] = 0\n\n # Get the sum of the booleans for each time stamp\n genBoolsTot = genBools.sum(axis=1)\n\n # Get the diff of this vector\n genBoolsTotDiff = genBoolsTot.diff()\n # If diff is non-zero, this is a config change.\n genSwitched = np.abs(np.sign(genBoolsTotDiff))\n\n # Sum is the total number of configuration changes\n genConfigDeltaTot = genSwitched.sum()\n\n return genConfigDeltaTot\n\n"
] |
[
[
"numpy.sign"
]
] |
killthekitten/kaggle-carvana-2017
|
[
"90795b1587af0714e5499ae72a301be38014d8b9"
] |
[
"ensemble_gpu.py"
] |
[
"import os\nimport queue\nimport threading\nfrom _ast import Lambda\n\nimport keras.backend as K\nimport numpy as np\nimport tensorflow as tf\nfrom keras.engine.topology import Input\nfrom keras.engine.training import Model\nfrom keras.layers.core import Lambda\nfrom keras.preprocessing.image import array_to_img, load_img, img_to_array\nfrom tensorflow.python.client import device_lib\n\nfrom params import args\nfrom utils import ThreadsafeIter\n\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\ngpus = [x.name for x in device_lib.list_local_devices() if x.name[:4] == '/gpu']\n\nn_threads = args.ensembling_cpu_threads\nensembling_dir = args.ensembling_dir\nstrategy = args.ensembling_strategy\ndirs = args.dirs_to_ensemble\nfolds_dir = args.folds_dir\ndirs = [os.path.join(folds_dir, d) for d in dirs]\nfilenames = sorted(os.listdir(dirs[0]))\nnb_samples = len(filenames)\nfor d in dirs:\n if not os.path.exists(d):\n raise ValueError(d + \" doesn't exist\")\nprediction_dir = args.pred_mask_dir\n\nbatch_size = args.pred_batch_size\n\nbatch_indices = [(start, min(start + batch_size, len(filenames))) for start in range(0, len(filenames), batch_size)]\n\nbatch_indices = ThreadsafeIter(batch_indices)\n\n\ndef data_loader(q, ):\n for bi in batch_indices:\n start, end = bi\n x_batch = []\n filenames_batch = filenames[start:end]\n\n for filename in filenames_batch:\n imgs = []\n for d in dirs:\n img = img_to_array(load_img(os.path.join(d, filename), grayscale=True))\n imgs.append(np.squeeze(img))\n x_batch.append(np.array(imgs).transpose((1, 2, 0)))\n q.put((filenames_batch, np.array(x_batch)))\n\n for gpu in gpus:\n q.put((None, None))\n\n\ndef predictor(q, gpu, pq):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n with sess.as_default():\n model = create_model(gpu)\n while True:\n batch_fnames, x_batch = q.get()\n if x_batch is None:\n break\n\n preds = model.predict_on_batch(x_batch)\n\n for i, pred in enumerate(preds):\n filename = batch_fnames[i]\n pq.put((os.path.join(ensembling_dir, filename[:-4] + \".png\"), pred))\n\n\ndef file_writer(q, ):\n while True:\n filename, img_array = q.get()\n if filename is None:\n break\n array_to_img(img_array * 255).save(os.path.join(ensembling_dir, filename[:-4] + \".png\"))\n\n\nq_size = 100\n\n\ndef create_model(gpu):\n with tf.device(gpu):\n input = Input((1280, 1918, len(dirs)))\n x = Lambda(lambda x: K.mean(x, axis=-1, keepdims=True))(input)\n model = Model(input, x)\n model.summary()\n return model\n\n\nprint('Ensembling on {} samples with batch_size = {}...'.format(len(filenames), batch_size))\nq = queue.Queue(maxsize=1000)\nthreads = [threading.Thread(target=data_loader, name='DataLoader', args=(q,)) for t in range(n_threads//2)]\nwriting_queue = queue.Queue(maxsize=1000)\n\nfor i in range(n_threads//2):\n threads.append(threading.Thread(target=file_writer, name='DataWriter', args=(writing_queue,)))\n\n\nfor gpu in gpus:\n print(\"Starting ensembler at device \" + gpu)\n\n t = threading.Thread(target=predictor, name='Ensembler', args=(q, gpu, writing_queue))\n threads.append(t)\n\nfor t in threads:\n t.start()\n\nfor t in threads:\n t.join()\n"
] |
[
[
"tensorflow.device",
"tensorflow.python.client.device_lib.list_local_devices",
"numpy.squeeze",
"tensorflow.ConfigProto",
"tensorflow.Session",
"numpy.array"
]
] |
chris-ch/coins
|
[
"41e1976f0459b0d77a9e882f6e0e10777ae6685d"
] |
[
"src/sbcireport.py"
] |
[
"import pandas\nfrom collections import defaultdict\nimport logging\n\nfrom pnl import AverageCostProfitAndLoss\n\n\ndef _select_prices(reporting_currency, prices):\n \"\"\"\n\n :param reporting_currency:\n :param prices:\n :return:\n \"\"\"\n all_prices = prices.set_index('date')\n filter_columns = [column for column in all_prices.columns if column.endswith(reporting_currency)]\n prices_selection = all_prices[filter_columns]\n prices_selection.columns = [column.split('/')[0] for column in prices_selection.columns]\n return prices_selection\n\n\ndef _include_indices(target_df, source_df):\n \"\"\"\n Adds missing indices from source_df into target_df.\n\n :param target_df:\n :param source_df:\n :return:\n \"\"\"\n complete_index = target_df.index.append(source_df.index)\n reindexed = target_df.reindex(complete_index)\n return reindexed.sort_index()\n\n\ndef compute_balances(flows):\n \"\"\"\n Balances by currency.\n :param flows:\n :return:\n \"\"\"\n flows = flows.set_index('date')\n flows_by_asset = flows.pivot(columns='asset', values='amount').apply(pandas.to_numeric)\n balances = flows_by_asset.fillna(0).cumsum()\n return balances\n\n\ndef extend_balances(reporting_currency, balances, prices):\n \"\"\"\n\n :param balances:\n :param reporting_currency:\n :param prices:\n :return:\n \"\"\"\n prices_selection = _select_prices(reporting_currency, prices)\n # removes duplicates (TODO: find bug)\n prices_selection = prices_selection[~prices_selection.index.duplicated(keep='first')]\n prices_selection = _include_indices(prices_selection, balances).ffill()\n extended_balances = _include_indices(balances, prices_selection).ffill()\n # removing duplicates\n extended_balances = extended_balances.groupby('date').first()\n return extended_balances, prices_selection\n\n\ndef compute_trades_pnl(reporting_currency, prices, trades):\n \"\"\"\n Trades P&L by asset expressed in the reporting currency.\n\n :param reporting_currency:\n :param prices:\n :param trades:\n :return: DataFrame (<index 'date'>, list of asset codes) containing pnl history for each asset\n \"\"\"\n logging.debug('loaded orders:\\n{}'.format(trades))\n if trades.empty:\n result = pandas.DataFrame({'asset': [], 'date': [], 'realized_pnl': [], 'total_pnl': [], 'unrealized_pnl': []})\n\n else:\n trades = trades.set_index('date')\n prices_selection = _select_prices(reporting_currency, prices)\n prices_selection[reporting_currency] = 1\n prices_selection = _include_indices(prices_selection, trades).ffill()\n pnl_tracker = defaultdict(AverageCostProfitAndLoss)\n pnl_data = list()\n for timestamp, price_row in prices_selection.iterrows():\n if timestamp in trades.index:\n current_trades = trades.loc[timestamp]\n for trade_ts, trade_row in current_trades.iterrows():\n fees = trade_row['fee']\n asset = trade_row['asset']\n fill_qty = float(trade_row['amount'])\n fill_price = price_row[asset]\n pnl_tracker[asset].add_fill(fill_qty, fill_price, fees)\n pnl_asset_data = {\n 'date': trade_ts,\n 'asset': asset,\n 'unrealized_pnl': pnl_tracker[asset].get_unrealized_pnl(fill_price),\n 'realized_pnl': pnl_tracker[asset].realized_pnl,\n 'total_pnl': pnl_tracker[asset].get_total_pnl(fill_price),\n }\n pnl_data.append(pnl_asset_data)\n logging.info('*trade* added pnl data: {}'.format(pnl_asset_data))\n\n else:\n for asset in pnl_tracker:\n pnl_asset_data = {\n 'date': timestamp,\n 'asset': asset,\n 'unrealized_pnl': pnl_tracker[asset].get_unrealized_pnl(price_row[asset]),\n 'realized_pnl': pnl_tracker[asset].realized_pnl,\n 'total_pnl': pnl_tracker[asset].get_total_pnl(price_row[asset]),\n }\n pnl_data.append(pnl_asset_data)\n logging.info('added pnl data: {}'.format(pnl_asset_data))\n\n result = pandas.DataFrame(pnl_data)\n\n result_filtered = result[['date', 'asset', 'total_pnl']]\n pnl_by_currency = result_filtered.pivot_table(index='date', columns='asset', values='total_pnl')\n return pnl_by_currency.ffill()\n\n\ndef breakdown_flows(balances_by_asset, balances):\n \"\"\"\n\n :param balances_by_asset:\n :param balances:\n :return:\n \"\"\"\n logging.info('flows:\\n{}'.format(balances_by_asset))\n flow_dates = balances_by_asset.reset_index()['date']\n timespans = pandas.DataFrame({'start': flow_dates, 'end': flow_dates.shift(-1)}, columns=['start', 'end'])\n balances_segments = list()\n for index, timespan in timespans.iterrows():\n start_date = timespan['start']\n end_date = timespan['end']\n logging.info('{} --> {}'.format(start_date, end_date))\n if end_date == pandas.NaT:\n timespan_filter = (balances['date'] >= start_date)\n\n else:\n timespan_filter = ((balances['date'] >= start_date)\n & (balances['date'] < end_date))\n\n current_balances_by_asset = balances[timespan_filter]\n balances_segments.append(current_balances_by_asset['Portfolio P&L'])\n\n return balances_segments\n\n\ndef compute_pnl(reporting_currency, flows, prices, trades):\n \"\"\"\n\n :param reporting_currency:\n :param flows:\n :param prices:\n :param trades:\n :return:\n \"\"\"\n balances_by_asset = compute_balances(flows)\n extended_balances, prices_selection = extend_balances(reporting_currency, balances_by_asset, prices)\n balances_in_reporting_currency = prices_selection * extended_balances\n balances_in_reporting_currency = balances_in_reporting_currency.fillna(0)\n balances_in_reporting_currency['Portfolio P&L'] = balances_in_reporting_currency.apply(sum, axis=1)\n balances_in_reporting_currency.reset_index(inplace=True)\n\n segments = breakdown_flows(balances_by_asset, balances_in_reporting_currency)\n # linking segments and normalizing\n previous_level = 1\n normalized = pandas.Series()\n for segment in segments:\n if not segment.empty:\n trades_pnl = compute_trades_pnl(reporting_currency, prices, trades)\n logging.info('trades pnl for segment:\\n{}'.format(trades_pnl))\n logging.info('processing segment:\\n{}'.format(segment))\n current_normalized = segment * previous_level / segment.iloc[0]\n normalized = normalized.append(current_normalized)\n logging.info('normalized segment:\\n{}'.format(current_normalized))\n previous_level = current_normalized.iloc[-1]\n\n balances_in_reporting_currency['Portfolio P&L'] = normalized\n balances_in_reporting_currency['Portfolio P&L'].ffill(inplace=True)\n balances_in_reporting_currency['Portfolio P&L'].fillna(1, inplace=True)\n pnl_history_records = balances_in_reporting_currency.sort_values('date', ascending=False)\n return pnl_history_records\n\n"
] |
[
[
"pandas.Series",
"pandas.DataFrame"
]
] |
oreilly-japan/building-ml-pipelines-ja
|
[
"a5044c63610cd116612cfafb66261611b4541ebb"
] |
[
"utils/download_dataset.py"
] |
[
"\"\"\"\nDownloads the csv data\n\"\"\"\n\nimport logging\nimport os\n\nimport pandas as pd\n\n# Initial dataset source\nDATASET_URL = \"http://bit.ly/building-ml-pipelines-dataset\"\n\n# Initial local dataset location\nLOCAL_FILE_NAME = \"data/consumer_complaints_with_narrative.csv\"\n\n\ndef download_dataset(url=DATASET_URL):\n \"\"\"download_dataset downloads the remote dataset to a local path\n\n Args:\n url (str): complete url path to the csv data source (default: {DATASET_URL})\n \"\"\"\n df = pd.read_csv(url, index_col=0)\n df.to_csv(LOCAL_FILE_NAME)\n logging.info(\"Download completed.\")\n\n\ndef create_folder():\n \"\"\"Creates a data folder if it doesn't exist.\"\"\"\n directory = \"data/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n logging.info(\"Data folder created.\")\n else:\n logging.info(\"Data folder already existed.\")\n\n\ndef check_execution_path():\n \"\"\"Check if the function and all subsequent functions\n are executed from the root of the project\n\n Returns:\n bool: returns False if execution path isn't the root, otherwise True\n \"\"\"\n file_name = \"LICENSE\"\n if not os.path.exists(file_name):\n logging.error(\n \"Don't execute the script from a sub-directory. \"\n \"Switch to the root of the project folder\"\n )\n return False\n return True\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n logging.info(\"Started download script\")\n\n if check_execution_path():\n create_folder()\n download_dataset()\n\n logging.info(\"Finished download script\")\n"
] |
[
[
"pandas.read_csv"
]
] |
ag-ds-bubble/panbox
|
[
"8fb9854fd3c9a931b818bd51781e6d4fb046d580"
] |
[
"panbox/pansim/pansim_active/base.py"
] |
[
"from ..pansim_view.pansim_view_handler import PanSimViewHandler\nimport pandas as pd\nimport numpy as np\nfrom ..._configs.pansim_configs import ROOT_PATH\n\nclass ActiveBase:\n\n def __init__(self):\n self.pansimData = {}\n density_data = pd.read_csv(ROOT_PATH+'/_animutils/population_density.csv', index_col=0)\n density_data['Population_Density'] = (density_data['Population_Density']/density_data['Population_Density'].max())*1000\n self.pansimData['popdensity_data'] = density_data\n self.pansimView = PanSimViewHandler()\n\n def initialise_parameters(self):\n # Canvas Based\n self.countries = self.pansimData['popdensity_data'].Country.unique().tolist()\n self.country = self.countries[0]\n\n _tempcountry = self.pansimData['popdensity_data'].Country.unique().tolist()[0]\n self.states = self.pansimData['popdensity_data'].set_index('Country').loc[_tempcountry].State.unique().tolist()\n self.state = self.states[0]\n\n _tempstate = self.states[0]\n _popdensity = self.pansimData['popdensity_data'].set_index('Country').loc[_tempcountry].set_index('State').loc[_tempstate]\n self.population_density = int(np.round(_popdensity['Population_Density']))\n self.ms_per_day = 5\n self.initially_infected_people = 10\n self.r_naught_after = 2.2\n\n # Disease Based\n self.infection_radius = 1\n self.quarentineafter = 14\n self.transmission_probab = 0.4\n self.fatality_rate = 0.02\n self.incubation_period = 28\n self.asymptomatic_percent = 0.5\n\n\n # Counter Measures\n self.socialdistancing_reulsiveforce = 0.0\n self.travelling_radius = 100\n self.intervene_after_days = 10\n\n def reset_widgets(self):\n\n self.pansimView.S2_L1_country_DD.options = self.countries\n self.pansimView.S2_L1_country_DD.value = self.countries[0]\n\n _tempcountry = self.pansimData['popdensity_data'].Country.unique().tolist()\n self.states = self.pansimData['popdensity_data'].set_index('Country').loc[self.country].State.unique().tolist()\n self.state = self.states[0]\n\n self.pansimView.S2_L1_state_DD.options = self.states\n self.pansimView.S2_L1_state_DD.value = self.state\n\n _popdensity = self.pansimData['popdensity_data'].set_index('Country').loc[self.country].set_index('State').loc[self.pansimView.S2_L1_state_DD.value]\n _popdensity = _popdensity['Population_Density']\n \n self.pansimView.S2_L1_msperday_IS.min = 1\n self.pansimView.S2_L1_msperday_IS.value = 5\n self.pansimView.S2_L1_msperday_IS.max = 30\n\n self.pansimView.S2_L1_popdensity_FS.max = 1000\n self.pansimView.S2_L1_popdensity_FS.min = 10\n self.pansimView.S2_L1_popdensity_FS.value = _popdensity\n\n\n\n self.pansimView.S2_L1_initialaffected_IS.min = 1\n self.pansimView.S2_L1_initialaffected_IS.value = 10\n self.pansimView.S2_L1_initialaffected_IS.max = 100\n\n \n\n self.pansimView.S2_L2_infectradii_FS.min = 0.1\n self.pansimView.S2_L2_infectradii_FS.value = 1\n self.pansimView.S2_L2_infectradii_FS.max = 20\n\n self.pansimView.S2_L2_transmissionprob_FS.min = 0\n self.pansimView.S2_L2_transmissionprob_FS.value = 0.4\n self.pansimView.S2_L2_transmissionprob_FS.max = 1\n \n self.pansimView.S2_L2_incubperiod_IS.min = 0\n self.pansimView.S2_L2_incubperiod_IS.value = 14\n self.pansimView.S2_L2_incubperiod_IS.max = 50\n\n self.pansimView.S2_L2_quarentineafter_IS.min = 0\n self.pansimView.S2_L2_quarentineafter_IS.value = 2\n self.pansimView.S2_L2_quarentineafter_IS.max = 100\n\n self.pansimView.S2_L2_fatalityrate_FS.min = 0.0\n self.pansimView.S2_L2_fatalityrate_FS.value = 0.02\n self.pansimView.S2_L2_fatalityrate_FS.max = 1.0\n\n self.pansimView.S2_L2_asymptrate_FS.min = 0.0\n self.pansimView.S2_L2_asymptrate_FS.value = 0.0\n self.pansimView.S2_L2_asymptrate_FS.max = 1.0\n\n\n\n self.pansimView.S2_L3_socialdist_FS.min = 0.0\n self.pansimView.S2_L3_socialdist_FS.value = 0.0\n self.pansimView.S2_L3_socialdist_FS.max = 1.0\n\n self.pansimView.S2_L3_travelradii_FS.min = 10\n self.pansimView.S2_L3_travelradii_FS.value = 100\n self.pansimView.S2_L3_travelradii_FS.max = 100\n\n self.pansimView.S2_L3_interventionday_BIT.min = 0\n self.pansimView.S2_L3_interventionday_BIT.value = 10\n self.pansimView.S2_L3_interventionday_BIT.max = 100\n\n self.pansimView.S2_L3_transproobafter_FS.min = 0.0\n self.pansimView.S2_L3_transproobafter_FS.value = 0.5\n self.pansimView.S2_L3_transproobafter_FS.max = 1.0\n\n def pull_curr_param_values(self, change=None):\n\n # Canvas Based\n self.selected_country = self.pansimView.S2_L1_country_DD.value\n self.selected_state = self.pansimView.S2_L1_state_DD.value\n self.population_density = int(np.round(self.pansimView.S2_L1_popdensity_FS.value))\n self.ms_per_day = self.pansimView.S2_L1_msperday_IS.value\n self.initially_infected_people = int(self.pansimView.S2_L1_initialaffected_IS.value)\n self.r_naught_after = self.pansimView.S2_L3_transproobafter_FS.value\n\n # Disease Based\n self.infection_radius = self.pansimView.S2_L2_infectradii_FS.value\n self.transmission_probab = self.pansimView.S2_L2_transmissionprob_FS.value\n self.incubation_period = self.pansimView.S2_L2_incubperiod_IS.value\n self.quarentineafter = self.pansimView.S2_L2_quarentineafter_IS.value\n self.fatality_rate = self.pansimView.S2_L2_fatalityrate_FS.value\n self.asymptomatic_percent = self.pansimView.S2_L2_asymptrate_FS.value\n\n # Counter Measures\n self.socialdistancing_reulsiveforce = self.pansimView.S2_L3_socialdist_FS.value\n self.travelling_radius = self.pansimView.S2_L3_travelradii_FS.value\n self.intervene_after_days = self.pansimView.S2_L3_interventionday_BIT.value\n\n\n"
] |
[
[
"numpy.round",
"pandas.read_csv"
]
] |
noaa-ocs-hydrography/bathycube
|
[
"917dcac9668377b58dde6a1fdab4d875cd5804af"
] |
[
"bathycube/cube.py"
] |
[
"\"\"\"\nPython implementation of the CUBE module, Combined Uncertainty and Bathymetry Estimator. Python implementation done\nby Eric Younkin, Feb 2022.\n\nCUBE was developed as a research project within the Center of for Coastal and Ocean Mapping and NOAA/UNH Joint Hydrographic\nCenter (CCOM/JHC) at the University of New Hampshire, starting in the fall of 2000.\n\"\"\"\n\nimport sys\nimport numpy as np\nfrom numba import types, typed\nfrom numba.experimental import jitclass\nimport json\nfrom enum import Enum\nimport logging\n\n# ex using jitclass with composition: https://stackoverflow.com/questions/38682260/how-to-nest-numba-jitclass\n\n\nclass StdErrFilter(logging.Filter):\n \"\"\"\n filter out messages that are not CRITICAL or ERROR or WARNING\n \"\"\"\n def filter(self, rec):\n return rec.levelno in (logging.CRITICAL, logging.ERROR, logging.WARNING)\n\n\nclass StdOutFilter(logging.Filter):\n \"\"\"\n filter out messages that are not DEBUG or INFO\n \"\"\"\n def filter(self, rec):\n return rec.levelno in (logging.DEBUG, logging.INFO)\n\n\ndef return_logger(logfile: str = None, loglevel=logging.INFO):\n \"\"\"\n If logfile is included, the file handler is added to the log so that the output is also driven to file.\n\n I disable the root logger by clearing out it's handlers because it always gets a default stderr log handler that\n ends up duplicating messages. Since I want the stderr messages formatted nicely, I want to setup that handler \\\n myself.\n\n Parameters\n ----------\n logfile\n path to the log file where you want the output driven to, if None, will not log to file\n loglevel\n logging level to use\n\n Returns\n -------\n logger: logging.Logger instance for the provided name/logfile\n\n \"\"\"\n \n fmat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logger = logging.getLogger('cube')\n logger.setLevel(loglevel)\n\n consolelogger = logging.StreamHandler(sys.stdout)\n consolelogger.setLevel(loglevel)\n # consolelogger.setFormatter(logging.Formatter(fmat))\n consolelogger.addFilter(StdOutFilter())\n\n errorlogger = logging.StreamHandler(sys.stderr)\n errorlogger.setLevel(logging.WARNING)\n # errorlogger.setFormatter(logging.Formatter(fmat))\n errorlogger.addFilter(StdErrFilter())\n\n logger.addHandler(consolelogger)\n logger.addHandler(errorlogger)\n\n if logfile is not None:\n filelogger = logging.FileHandler(logfile)\n filelogger.setLevel(loglevel)\n filelogger.setFormatter(logging.Formatter(fmat))\n logger.addHandler(filelogger)\n\n # eliminate the root logger handlers, it will have a default stderr pointing handler that ends up duplicating all the logs to console\n logging.getLogger().handlers = []\n\n return logger\n\n\ndef get_iho_limits(iho_order: str):\n \"\"\"\n Get fixed and variable Total Vertical Uncertainty components for the different IHO Order categories, see S-44\n Table 1 - Minimum Bathymetry Standards for Safety of Navigation Hydrographic Surveys\n\n Parameters\n ----------\n iho_order\n string representation of one of the IHO order categories, i.e. 'special' or 'order1a'\n\n Returns\n -------\n float\n 'a' component, the fixed component of the TVU equation\n float\n 'b' component, the variable component of the TVU equation\n \"\"\"\n if iho_order == 'exclusive':\n return 0.15, 0.0075\n elif iho_order == 'special':\n return 0.25, 0.0075\n elif iho_order == 'order1a':\n return 0.5, 0.013\n elif iho_order == 'order1b':\n return 0.5, 0.013\n elif iho_order == 'order2':\n return 1.0, 0.023\n\n\nclass CubeParameters:\n def __init__(self):\n self.iho_order = 'order1a'\n self.grid_resolution_x = 0.0\n self.grid_resolution_y = 0.0\n\n self.initialization_interlock = None # system mapsheet initialization marker\n self.no_data_value = np.float32(np.nan) # Value used to indicate no data\n self.extractor = 'lhood' # method used to extract information from sheet, one of 'lhood', 'prior', 'posterior', 'predsurf', 'union'\n self.nodata_depth = 0.0 # depth to initialize estimates\n self.nodata_variance = 1000000 # variance value for initialization\n self.dist_exponent = 2.0 # exponent on distance for variance scale\n self.inv_dist_exponent = 1 / self.dist_exponent # inverse of dist exponent for efficiency\n self.dist_scale = 0.0 # normalization coefficient for distance\n self.var_scale = 0.0 # variance scale dilution factor, placeholder, will be computed on initialization\n self.iho_fixed = 0.0 # fixed portion of IHO error budget, placeholder, will be computed on initialization\n self.iho_percent = 0.0 # variable portion of IHO error budget, placeholder, will be computed on initialization\n self.median_length = 11 # Length of median pre-filter sort queue (must be odd number for algorithm)\n self.quotient_limit = 30.0 # Outlier quotient upper allowable limit, Approx. 0.1% F(1,6)\n self.discount = 1.0 # Discount factor for evolution noise variance\n self.est_offset = 4.0 # Threshold for significant offset from current estimate to warrant an intervention, Set by West & Harrison's method of significant percentage points.\n self.bayes_factor_threshold = 0.135 # Bayes factor threshold for either a single estimate, or the worst case recent sequence to warrant an intervention, Set by West & Harrison's method of significant evidence for M_1\n self.runlength_threshold = 5 # Run length threshold for worst case recent sequence to indicate a drift failure and hence to warrant an intervention, Ball-park figure following West & Harrison's method\n self.min_context = 5 # Minimum context search range for hypothesis disambiguation algorithm\n self.max_context = 10 # Maximum context search range\n self.stddev_to_conf_scale = 1.96 # Scale from Std.Dev.to CI, 95 percent CI\n # blunders = beam solutions generated by the multibeam that do not correctly represent the seafloor\n self.blunder_min = 10.0 # Minimum depth difference from pred.depth to consider a blunder\n self.blunder_percent = 0.25 # Percentage of predicted depth to be considered a blunder, if more than the minimum (0 < p < 1, typ.0.25).\n self.blunder_scalar = 3.0 # Scale on initialisation surface std. dev. at a node to allow before considering deep spikes to be blunders.\n self.capture_dist_scale = 0.05 # Scale on predicted or estimated depth for how far out to accept data. (unitless; typically 0.05 for hydrography but can be greater for geological mapping in flat areas with sparse data)\n\n # Controls the reported variance, one of 'cube' to use CUBE's posterior variance estimate, 'input' to track and\n # use input sample variance, and 'max' to report the greater of the two\n self.variance_selection = 'cube'\n\n def _get_iho_limits(self):\n \"\"\"\n Get fixed and variable Total Vertical Uncertainty components for the different IHO Order categories, see S-44\n Table 1 - Minimum Bathymetry Standards for Safety of Navigation Hydrographic Surveys\n\n Returns\n -------\n float\n 'a' component, the fixed component of the TVU equation\n float\n 'b' component, the variable component of the TVU equation\n \"\"\"\n return get_iho_limits(self.iho_order)\n\n def initialize(self, iho_order: str = 'order1a', grid_resolution_x: float = 1.0, grid_resolution_y: float = 1.0):\n \"\"\"\n Build the situational parameters now, those related to IHO order or grid resolution.\n\n Parameters\n ----------\n iho_order\n one of the IHO order string identifiers, i.e. 'order1a'\n grid_resolution_x\n size of the grid cell in the x/easting direction\n grid_resolution_y\n size of the grid cell in the y/northing direction\n\n Returns\n -------\n\n \"\"\"\n self.iho_order = iho_order\n self.grid_resolution_x = grid_resolution_x\n self.grid_resolution_y = grid_resolution_y\n # Compute distance scale based on node spacing\n self.dist_scale = min(grid_resolution_x, grid_resolution_y) # normalization coefficient for distance\n self.min_context = min(1, int(self.min_context / self.dist_scale))\n self.max_context = min(1, int(self.max_context / self.dist_scale))\n # Compute variance scaling factor for dilution function\n self.var_scale = self.dist_scale ** -self.dist_exponent\n # IHO Survey Order limits for determining maximum allowable error\n self.iho_fixed, self.iho_percent = self._get_iho_limits()\n\n # we square these? Not sure about this but it is in the C code\n self.iho_fixed = self.iho_fixed ** 2\n self.iho_percent = self.iho_percent ** 2\n\n def write_parameter_file(self, param_file: str):\n try:\n with open(param_file, 'w') as outfile:\n json.dump(self.__dict__, outfile)\n print('New CubeParameters file written to {}'.format(param_file))\n except:\n raise ValueError('CubeParameters: Unable to write new parameter file to {}'.format(param_file))\n\n def open_parameter_file(self, param_file: str):\n valid_data = False\n with open(param_file, 'r') as infile:\n try:\n data = json.load(infile)\n except:\n raise ValueError('CubeParameters: Unable to read data from {} as json'.format(param_file))\n for ky, val in data.items():\n if ky in self.__dict__:\n self.__setattr__(ky, val)\n valid_data = True\n if valid_data:\n print('CubeParameters read successfully from {}'.format(param_file))\n else:\n print('CubeParameters: Unable to find any valid data in {}'.format(param_file))\n\n\nclass Hypothesis:\n def __init__(self, initial_mean_estimate=0.0, initial_variance_estimate=0.0):\n self.current_depth = initial_mean_estimate # current depth mean estimate\n self.current_variance = initial_variance_estimate # current depth variance estimate\n self.predict_depth = initial_mean_estimate # current depth next-state mean prediction\n self.predict_variance = initial_variance_estimate # current depth next-state variance prediction\n self.cum_bayes_fac = 1.0 # cumulative bayes factor for node monitoring\n self.seq_length = 0 # worst case sequence length for monitoring\n self.hypothesis_number = 0 # index term for debugging\n self.number_of_points = 1 # number of points incorporated into this node\n self.variance_estimate = 0.0 # running estimate of variance of inputs\n\n\nclass CubeNode:\n \"\"\"\n CubeNode - The primary estimation structural element. This maintains the median pre-\n filter queue, the linked list of depth hypotheses, and the sample statistics\n for a single node.\n \"\"\"\n\n def __init__(self, depth_tolerance: float = 0.01, bayes_factor_threshold: float = 0.135, est_offset: float = 4.0,\n runlength_threshold: int = 5, discount: float = 1.0, quotient_limit: float = 30.0,\n max_hypothesis_ratio: float = 5.0, median_length: float = 11, blunder_min: float = 10.0,\n blunder_percent: float = 0.25, blunder_scalar: float = 3.0, capture_dist_scale: float = 0.05,\n var_scale: float = 0.0, dist_exponent: float = 2.0, stddev_to_conf_scale: float = 1.96,\n no_data_value: float = np.float32(np.nan), variance_selection: str = 'cube', use_queue: bool = True,\n logger: logging.Logger = logging.getLogger()):\n \"\"\"\n These default arguments are used or otherwise driven from the CubeParams class\n\n Parameters\n ----------\n depth_tolerance\n the maximum difference allowed when searching hypotheses by depth\n bayes_factor_threshold\n Bayes factor threshold for either a single estimate, or the worst case recent sequence to warrant an\n intervention, Set by West & Harrison's method of significant evidence for M_1\n est_offset\n Threshold for significant offset from current estimate to warrant an intervention, Set by West & Harrison's\n method of significant percentage points.\n runlength_threshold\n Run length threshold for worst case recent sequence to indicate a drift failure and hence to warrant an\n intervention, Ball-park figure following West & Harrison's method\n discount\n Discount factor for evolution noise variance\n quotient_limit\n Outlier quotient upper allowable limit, Approx. 0.1% F(1,6). From CUBE User Manual - With the released\n multiple-hypothesis version of CUBE this parameter is no longer necessary, and could, if set inappropriately\n low, eliminate valid soundings from consideration. It is now a dangerous parameter, rather than a useful one.\n Hence it should always either be set to its maximum value of 255, or else removed as a user-accessible parameter.\n max_hypothesis_ratio\n ceiling to place on hypothesis strength ratios\n median_length\n Length of median pre-filter sort queue (must be odd number for algorithm)\n blunder_min\n Minimum depth difference from pred.depth to consider a blunder\n blunder_percent\n Percentage of predicted depth to be considered a blunder, if more than the minimum (0 < p < 1, typ.0.25)\n blunder_scalar\n Scale on initialisation surface std. dev. at a node to allow before considering deep spikes to be blunders\n capture_dist_scale\n Scale on predicted or estimated depth for how far out to accept data. (unitless; typically 0.05 for hydrography\n but can be greater for geological mapping in flat areas with sparse data)\n var_scale\n variance scale dilution factor\n dist_exponent\n exponent on distance for variance scale\n stddev_to_conf_scale\n Scale from Std.Dev.to CI, 95 percent CI is the default\n no_data_value\n Value used to indicate no data\n variance_selection\n controls the reported variance, one of 'cube' to use CUBE's posterior variance estimate, 'input' to track and\n use input sample variance, and 'max' to report the greater of the two\n use_queue\n Executes the 'Reordering' step, see CUBE User Manual 3.1. With this set to False, this step is skipped.\n User Manual states that with multiple hypothesis implementation of CUBE, Reordering is no longer necessary.\n logger\n logger instance for debug messaging\n \"\"\"\n\n self.queue = []\n self.n_queued = 0\n self.hypotheses = [] # this should be a list of Hypothesis\n self.nominated = None\n self._pred_depth = 0.0\n self._pred_var = 0.0\n\n self.depth_tolerance = depth_tolerance\n self.bayes_factor_threshold = bayes_factor_threshold\n self.est_offset = est_offset\n self.runlength_threshold = runlength_threshold\n self.discount = discount\n self.quotient_limit = quotient_limit\n self.max_hypothesis_ratio = max_hypothesis_ratio\n self.median_length = median_length\n self.blunder_min = blunder_min\n self.blunder_percent = blunder_percent\n self.blunder_scalar = blunder_scalar\n self.capture_dist_scale = capture_dist_scale\n self.var_scale = var_scale\n self.dist_exponent = dist_exponent\n self.stddev_to_conf_scale = stddev_to_conf_scale\n self.no_data_value = no_data_value\n self.variance_selection = variance_selection.lower()\n self.use_queue = use_queue\n\n self.logger = logger\n\n @property\n def predicted_depth(self):\n return self._pred_depth\n\n @predicted_depth.setter\n def predicted_depth(self, new_depth: float):\n self._pred_depth = new_depth\n\n @property\n def predicted_variance(self):\n return self._pred_var\n\n @predicted_variance.setter\n def predicted_variance(self, new_variance: float):\n self._pred_var = new_variance\n\n def add_hypothesis(self, depth: float, variance: float, null_hypothesis: bool = False):\n \"\"\"\n Add a specific depth hypothesis to the current list\n\n Parameters\n ----------\n depth\n depth to set for the hypothesis\n variance\n variance to set for the hypothesis\n null_hypothesis\n if True, this is a null hypothesis, which is a specific hypothesis that has the number of points set to zero\n \"\"\"\n\n new_hypo = Hypothesis(depth, variance)\n if null_hypothesis:\n new_hypo.number_of_points = 0\n new_hypo.hypothesis_number = len(self.hypotheses) + 1\n self.logger.log(logging.DEBUG, f'add_hypothesis: new hypothesis number {new_hypo.hypothesis_number} for depth {depth} variance {variance}')\n self.hypotheses.append(new_hypo)\n\n def remove_hypothesis(self, depth: float):\n \"\"\"\n This removes a hypothesis from a CubeNode permanently. The hypothesis to remove is determined by the depth\n provided. The algorithm allows up to self.depth_tolerance difference between this depth and the depth in the\n hypothesis, but will only remove the hypothesis if there is a unique match to the depth. Tolerance is nominally\n a metric whisker (slightly smaller than the imperial), or 0.01m.\n\n Parameters\n ----------\n depth\n the depth of the hypothesis to remove\n \"\"\"\n\n hypo_idx = [self.hypotheses.index(h) for h in self.hypotheses if (abs(depth - h.current_depth) < self.depth_tolerance)]\n if len(hypo_idx) == 0:\n self.logger.log(logging.WARNING, 'remove_hypothesis: unable to remove hypothesis at depth {}, no hypothesis found within {} meters'.format(depth, self.depth_tolerance))\n elif len(hypo_idx) == 1:\n hypo_idx = hypo_idx[0]\n if self.nominated is not None and (self.nominated == self.hypotheses[hypo_idx]):\n self.nominated = None\n self.hypotheses.pop(hypo_idx)\n self.logger.log(logging.DEBUG, f'remove_hypothesis: hypothesis number {hypo_idx} removed')\n else:\n self.logger.log(logging.ERROR, 'remove_hypothesis: Found multiple hypothesis at depth {} +- {}, unable to remove a single hypothesis'.format(depth, self.depth_tolerance))\n raise ValueError('remove_hypothesis: Found multiple hypothesis at depth {} +- {}, unable to remove a single hypothesis'.format(depth, self.depth_tolerance))\n\n def nominate_hypothesis(self, depth: float):\n \"\"\"\n This searches the list of hypotheses for one with depth within a whisker of the specified value --- in this\n case, a metric whisker, which is the same as 0.01m. The hypothesis that matches, or the one that minimises\n the distance if there is more than one, is marked as 'nominated', and is reconstructed every time without\n running the disam. engine until the user explicitly resets the over-ride (with cube_node_reset_nomination) or\n more data is added to the node.\n\n Parameters\n ----------\n depth\n depth of the hypothesis that we want to preserve by nominating\n \"\"\"\n\n min_depth_distance = None\n curr_hypo = None\n for hypo in self.hypotheses:\n depth_difference = abs(depth - hypo.current_depth)\n if depth_difference < self.depth_tolerance:\n if min_depth_distance: # this is not the first hypothesis that we have found within the tolerance\n if depth_difference < min_depth_distance:\n min_depth_distance = depth_difference\n curr_hypo = hypo\n self.logger.log(logging.DEBUG, f'nominate_hypothesis: clearing previously selected hypothesis for hypothesis, selecting hypothesis at depth {depth}')\n else: # this is the first hypo found within the tolerance\n min_depth_distance = depth_difference\n curr_hypo = hypo\n self.logger.log(logging.DEBUG, f'nominate_hypothesis: selecting hypothesis at depth {depth}')\n self.nominated = curr_hypo\n if self.nominated is None:\n self.logger.log(logging.WARNING, 'nominate_hypothesis: Warning, no hypothesis found to nominate at depth {} +- {}'.format(depth, self.depth_tolerance))\n\n def clear_nomination(self):\n \"\"\"\n Remove the reference to the nominated hypothesis\n \"\"\"\n\n self.nominated = None\n self.logger.log(logging.DEBUG, 'clear_nomination: remove nominated hypothesis')\n\n def has_nomination(self):\n \"\"\"\n Return True if there is a nominated hypothesis\n\n Returns\n -------\n bool\n if there is a nominated hypothesis, return True\n \"\"\"\n\n if self.nominated is not None:\n return True\n else:\n return False\n\n def monitor_hypothesis(self, hypo_index: int, new_depth: float, new_variance: float):\n \"\"\"\n Compute West % Harrison's monitoring statistics for the node hypothesis. Depends on self.est_offset (the offset\n we consider to be significant), self.bayes_factor_threshold (the Bayes factor threshold before intervention) and\n self.runlength_threshold (Number of bad factors to indicate sequence failure).\n\n Parameters\n ----------\n hypo_index\n The index of the hypothesis we want to monitor\n new_depth\n new input sample which is about to be incorporated\n new_variance\n observation noise variance\n\n Returns\n -------\n bool\n False if an intervention is required\n \"\"\"\n\n try:\n hypo = self.hypotheses[hypo_index]\n except IndexError:\n self.logger.log(logging.ERROR, f'monitor_hypothesis: Unable to pull hypothesis at index {hypo_index}')\n return False\n\n forecast_variance = hypo.predict_variance + new_variance\n error = (new_depth - hypo.predict_depth) / np.sqrt(forecast_variance)\n\n # the est_offset is W&H's `h' parameter (i.e., expected normalised difference between the current forecast and\n # the observation which just indicates an outlier)\n if error >= 0:\n bayes_factor = np.exp(0.5 * (self.est_offset ** 2 - (2.0 * self.est_offset * error)))\n else:\n bayes_factor = np.exp(0.5 * (self.est_offset ** 2 + (2.0 * self.est_offset * error)))\n self.logger.log(logging.DEBUG, f'monitor_hypothesis: calculated bayes factor {bayes_factor}, error {error}, forecast variance {forecast_variance}')\n\n # check for single component failure\n # The bayes_factor_threshold is W&H's `tau' (i.e., the minimum Bayes factor which is acceptable as evidence for the current model)\n if bayes_factor < self.bayes_factor_threshold:\n self.logger.log(logging.DEBUG, f'monitor_hypothesis: bayes factor less than minimum threshold {self.bayes_factor_threshold}, potential outlier')\n return False\n # update monitors\n if hypo.cum_bayes_fac < 1.0:\n hypo.seq_length += 1\n else:\n hypo.seq_length = 1\n hypo.cum_bayes_fac = bayes_factor * min(1.0, hypo.cum_bayes_fac)\n # check for consecutive failure errors\n # The runlength_t is W&H's limit on l_t (i.e., the number of consequtively bad Bayes factors which indicate that there has been a gradual shift away from the predictor)\n if (hypo.cum_bayes_fac < self.bayes_factor_threshold) or (hypo.seq_length > self.runlength_threshold):\n self.logger.log(logging.DEBUG, f'monitor_hypothesis: cum bayes fac {hypo.cum_bayes_fac} < {self.bayes_factor_threshold} or seq length {hypo.seq_length} > {self.runlength_threshold}, potential outlier')\n return False\n self.logger.log(logging.DEBUG, 'monitor_hypothesis: no intervention required')\n return True\n\n def reset_monitor(self, hypo_index: int):\n \"\"\"\n Clear the monitoring data from the provided hypothesis\n\n Parameters\n ----------\n hypo_index\n The index of the hypothesis we want to clear the monitor data from\n \"\"\"\n\n try:\n hypo = self.hypotheses[hypo_index]\n except IndexError:\n self.logger.log(logging.ERROR, f'monitor_hypothesis: Unable to pull hypothesis at index {hypo_index}')\n return False\n hypo.cum_bayes_fac = 1.0\n hypo.seq_length = 0\n self.logger.log(logging.DEBUG, 'reset_monitor: clear the monitoring data from the provided hypothesis')\n\n def update_hypothesis(self, hypo_index: int, depth: float, variance: float):\n \"\"\"\n Update the given hypothesis (index is provided) being tracked at this node. This implements the standard\n univariate dynamic linear model update equations (West & Harrison, 'Bayesian Forecasting and Dynamic Models',\n Springer, 2ed, 1997, Ch. 2), along with the Bayes factor monitoring code (W&H, Ch. 11). The only failure mode\n possible with this code is if the input data would cause an intervention to be requested on the current track.\n In this case, it is the caller's responsibility to utilise the data point, since it will not be incorporated\n into the hypothesis --- typically this would mean adding a new hypothesis and pushing it onto the stack.\n\n Parameters\n ----------\n hypo_index\n The index of the hypothesis we want to update\n depth\n estimate of depth\n variance\n estimate of variance\n\n Returns\n -------\n bool\n Returns False if the estimate does not really match the track that the hypothesis represents (i.e., an\n intervention is required).\n \"\"\"\n\n hypo = self.hypotheses[hypo_index]\n # check current estimate with node monitoring\n monitoring_answer = self.monitor_hypothesis(hypo_index, depth, variance)\n if not monitoring_answer:\n self.logger.log(logging.DEBUG, 'update_hypothesis: monitoring determined an intervention is required')\n return False\n\n if self.variance_selection != 'cube':\n hypo.variance_estimate = (hypo.number_of_points - 1) * hypo.variance_estimate / hypo.number_of_points + (depth - hypo.current_depth) ** 2 / hypo.number_of_points\n # add capability to 'age' the sounding with a discount factor.\n sys_variance = hypo.current_variance * (1.0 - self.discount) / self.discount\n\n gain = hypo.predict_variance / (variance + hypo.predict_variance)\n innovation = depth - hypo.predict_depth\n hypo.predict_depth += gain * innovation\n hypo.current_depth = hypo.predict_depth\n hypo.current_variance = variance * hypo.predict_variance / (variance + hypo.predict_variance)\n hypo.predict_variance = hypo.current_variance + sys_variance\n hypo.number_of_points += 1\n self.logger.log(logging.DEBUG, f'update_hypothesis: hypothesis number {hypo_index} updated with depth {depth} and variance {variance}')\n return True\n\n def best_hypothesis_index(self, depth: float, variance: float):\n \"\"\"\n Find the closest matching hypothesis in the current hypothesis list. This computes the normalized absolute error\n between one-step forecast for each hypothesis currently being tracked and the input sample, and returns the index\n of the hypothesis with the smallest error value. If there is more than one node with the same error (unlikely\n in practice, but possible), then the last one in the list is chosen.\n\n Parameters\n ----------\n depth\n current input sample to be matched\n variance\n current input variance to be matched\n\n Returns\n -------\n int\n index to the best hypothesis\n \"\"\"\n\n best_hypo_index = None\n min_error = None\n for idx, hyp in enumerate(self.hypotheses):\n forecast_variance = hyp.predict_variance + variance\n error = abs((depth - hyp.predict_depth) / np.sqrt(forecast_variance))\n if (min_error and error < min_error) or (min_error is None):\n min_error = error\n best_hypo_index = idx\n self.logger.log(logging.DEBUG, f'best_hypothesis_index: hypothesis number {best_hypo_index} picked with minimum error {min_error}')\n return best_hypo_index\n\n def choose_hypothesis(self):\n \"\"\"\n Choose the best hypothesis for this node. In this context, `best' means `hypothesis with most points',\n rather than through any other metric. This may not be the `best' until all of the data is in, but it should\n give an idea of what's going on in the data structure at any point (particularly if it changes dramatically\n from sample to sample)\n\n Returns\n -------\n Hypothesis\n the hypothesis with the most points\n float\n the hypothesis strength ratio for this hypothesis (how convinced CUBE is that the hypo is good)\n\n \"\"\"\n best_hypo = None\n hypo_ratio = 0.0\n second_highest_count = 0\n current_max_pointcount = 0\n for hyp in self.hypotheses:\n if hyp.number_of_points > 0:\n if hyp.number_of_points > current_max_pointcount:\n best_hypo = hyp\n if current_max_pointcount > second_highest_count:\n second_highest_count = current_max_pointcount\n current_max_pointcount = hyp.number_of_points\n elif hyp.number_of_points > second_highest_count:\n second_highest_count = hyp.number_of_points\n if second_highest_count and current_max_pointcount:\n hypo_ratio = max(0.0, self.max_hypothesis_ratio - (current_max_pointcount / second_highest_count))\n self.logger.log(logging.DEBUG, f'choose_hypothesis: hypothesis number {best_hypo.hypothesis_number} picked as it had the most points ({current_max_pointcount}), hypothesis strength {hypo_ratio}')\n return best_hypo, hypo_ratio\n\n def update_node(self, depth: float, variance: float):\n \"\"\"\n Update the CUBE equations for this node and input. This runs the basic filter equations, using the KF formulation,\n and its innovations formulation. This algorithm now includes a discounted system noise variance model to set the\n evolution noise dynamically depending on the variance that was estimated at the previous stage (West & Harrison,\n 'Bayesian Forecasting and Dynamic Models', Springer, 2ed., 1997, ch.2) and a monitoring scheme and feed-back\n interventions to allow the code to check that the estimates are staying in touch with the input data. The\n monitoring scheme is also based on West & Harrison as above, Ch.11, Sec. 11.5.1, using cumulative Bayes factors\n and the unidirectional level shift alternate model.\n\n Parameters\n ----------\n depth\n new depth estimate to incorporate\n variance\n new variance estimate to incorporate\n\n Returns\n -------\n bool\n True if node hypothesis was updated or if this is the first update. False if a new hypothesis had to be\n created because data did not allow updating an existing one.\n \"\"\"\n # find the best matching hypothesis index for the current input sample given those currently being tracked\n best_idx = self.best_hypothesis_index(depth, variance)\n if best_idx is None:\n # didn't match one, should only happen where there are no hypothesis, so we add a new one\n self.add_hypothesis(depth, variance, null_hypothesis=False)\n else:\n # update the best hypothesis with the current data\n updated = self.update_hypothesis(best_idx, depth, variance)\n if not updated:\n # failed update - indicates an intervention, so that we need to start a new hypothesis to capture the outlier/datum shift\n self.reset_monitor(best_idx)\n self.add_hypothesis(depth, variance, null_hypothesis=False)\n self.logger.log(logging.DEBUG, f'update_node: no hypothesis updated, depth {depth} variance {variance} successfully incorporated as new hypothesis')\n return True\n\n def truncate(self):\n \"\"\"\n Identify all points that are outliers and remove them from the queue. The definition of 'outlier' depends on\n the self.quotient_limit attribute. In general, the higher the value, the more extreme the difference between\n mean depth and point depth must be to be considered an outlier.\n\n In theory, the distribution of the quotient values computed should be approximately a Fisher F(1,N-2) where\n there are N points in the input sequence. The values of the quotients are always positive, and monotonically\n increasing for worse outliers; therefore, one-sided critical values should be considered.\n \"\"\"\n\n if self.n_queued < 3:\n self.logger.log(logging.DEBUG, f'truncate: with {self.n_queued} queued points, truncate unnecessary')\n return\n mean = 0.0\n sum_square_diff = 0.0\n num_points = self.n_queued - 1 # number of points + 1 outlier\n for depth, variance in self.queue:\n mean += depth\n sum_square_diff += depth ** 2\n sum_square_diff -= (mean ** 2) / (num_points + 1)\n mean /= (num_points + 1)\n sum_square_diff_k = num_points * sum_square_diff / (num_points ** 2 - 1)\n\n # run the list computing quotients, gather indicies for the outliers\n outlier_index = []\n for idx in range(len(self.queue)):\n depth = self.queue[idx][0]\n diff_sq = (depth - mean) ** 2\n quot = diff_sq / (sum_square_diff_k - (diff_sq / (num_points - 1)))\n if quot >= self.quotient_limit:\n self.logger.log(logging.DEBUG, f'truncate: point {idx} flagged for removal with quotient value greater than limit {self.quotient_limit}')\n outlier_index.append(idx)\n outlier_index = outlier_index[::-1]\n # now remove the marked entities from the queue\n for idx in outlier_index:\n self.queue.pop(idx)\n self.n_queued -= 1\n self.logger.log(logging.DEBUG, f'truncate: removed {len(outlier_index)} points from the queue')\n\n def flush_queue(self):\n \"\"\"\n This flushes the queue into the input sequence in order (i.e., take current median, resort, repeat). Since the\n queue is always sorted, we can just walk the list in order, rather than having to re-sort or shift data, etc.\n When we have an even number of points, we take the shallowest of the points first; this means that we walk the\n list alternately to the left and right, starting to the right if the initial number of points is even, and to\n the left if the number of points is odd. To avoid shifting the data, we just increase the step after every\n extraction, until we step off the LHS of the array.\n\n For a list of 10 points, the order ends up looking something like this: [4, 5, 3, 6, 2, 7, 1, 8, 0, 9]\n \"\"\"\n\n if self.n_queued == 0:\n self.logger.log(logging.DEBUG, f'flush_queue: no queued points to flush')\n return\n scale = 1\n self.truncate()\n self.logger.log(logging.DEBUG, f'flush_queue: flushing {self.n_queued} points')\n if self.n_queued % 2 == 0: # even\n ex_pt = int(self.n_queued / 2 - 1)\n direction = 1\n else: # odd\n ex_pt = int(self.n_queued / 2)\n direction = -1\n while ex_pt >= 0:\n self.update_node(self.queue[ex_pt][0], self.queue[ex_pt][1])\n ex_pt += direction * scale\n direction = -direction\n scale += 1\n self.queue = []\n self.n_queued = 0\n\n def queue_fill(self, depth: float, variance: float):\n \"\"\"\n Insert a new point into the queue, maintain depth sorted order, with greater depths last\n\n Parameters\n ----------\n depth\n new depth value to add to the queue\n variance\n new variance value to add to the queue\n \"\"\"\n\n if not self.use_queue:\n self.logger.log(logging.WARNING, f'queue_fill: skipping as use_queue is False')\n return\n if not self.queue:\n self.logger.log(logging.DEBUG, f'queue_fill: queue is empty, adding depth {depth}, variance {variance} to queue')\n self.queue.append([depth, variance])\n else:\n insertion_index = None\n for i in range(len(self.queue)):\n q_dpth = self.queue[i][0]\n if depth > q_dpth:\n insertion_index = i + 1\n else:\n break\n if insertion_index is not None:\n self.logger.log(logging.DEBUG, f'queue_fill: inserted at index {insertion_index}, adding depth {depth}, variance {variance} to queue')\n self.queue.insert(insertion_index, [depth, variance])\n else:\n self.logger.log(logging.DEBUG, f'queue_fill: adding to end of queue, adding depth {depth}, variance {variance} to queue')\n self.queue.insert(0, [depth, variance])\n self.n_queued += 1\n\n def queue_insert(self, depth: float, variance: float):\n \"\"\"\n Insert a point in the already filled queue. We then return the median point and insert this new point, ensuring\n that the queue remains sorted, with greater depths first.\n\n Parameters\n ----------\n depth\n new depth value to add to the queue\n variance\n new variance value to add to the queue\n\n Returns\n -------\n float\n median depth\n float\n median variance\n \"\"\"\n\n if not self.use_queue:\n self.logger.log(logging.WARNING, f'queue_insert: skipping as use_queue is False')\n return depth, variance\n # 11 / 2 = 5.5, floor(5.5) = 5, 5 being the median index of an array of 11 points\n median_index = int(np.floor(self.median_length / 2))\n mdepth, mvariance = self.queue.pop(median_index)\n insertion_index = None\n if depth > mdepth:\n check_range = range(median_index, len(self.queue))\n else:\n check_range = range(len(self.queue))\n\n for i in check_range:\n q_dpth = self.queue[i][0]\n if depth < q_dpth:\n insertion_index = i\n break\n\n if insertion_index is not None:\n self.logger.log(logging.DEBUG, f'queue_insert: queue full, inserted at index {insertion_index}, adding depth {depth}, variance {variance} to queue')\n self.queue.insert(insertion_index, [depth, variance])\n else:\n self.logger.log(logging.DEBUG, f'queue_insert: queue full, adding to end of queue, adding depth {depth}, variance {variance} to queue')\n self.queue.append([depth, variance])\n\n # compute the likely 99% confidence bound below the shallowest point and above the deepest point in the\n # buffer, and check that they do actually overlap somewhere in the middle. Otherwise, with less than 1% chance\n # of error, we are suspicious that there are outliers in the buffer somewhere and we should attempt a round of\n # outlier rejection. Assuming that the errors are approximately normal, 0.5% in either tail is achieved at\n # 2.5758 std dev from the mean.\n\n # 2.56 being the 99 percent confidence interval for normal distribution\n first_pt = self.queue[0]\n last_pt = self.queue[-1]\n low_water = last_pt[0] - 2.56 * np.sqrt(last_pt[1])\n high_water = first_pt[0] + 2.56 * np.sqrt(first_pt[1])\n if low_water >= high_water: # confidence limits do not overlap\n self.truncate() # remove any outliers\n self.logger.log(logging.DEBUG, f'queue_insert: queue full, returning median point depth {mdepth}, variance {mvariance}')\n return mdepth, mvariance\n\n def add_to_queue(self, depth: float, variance: float):\n \"\"\"\n Insert points into the queue of estimates and insert point into the filter sequence if the queue is filled.\n\n This inserts the depth given into the queue associated with the\tspecified node, creating the queue if required.\n After the queue has been primed (i.e., filled with estimates), on each call this routine extracts the median\n value from the queue and then inserts it into the CUBE input sequence. Note that this algorithm means that the\n queue will always be full, and hence must be flushed before extracting any depth estimates (this can also be\n done to save memory).\n\n if use_queue is set to False, skips the queue entirely and runs update_node with the provided depth/variance\n\n Parameters\n ----------\n depth\n new depth value to add to the queue\n variance\n new variance value to add to the queue\n \"\"\"\n\n if not self.queue:\n self.n_queued = 0\n if self.use_queue:\n self.logger.log(logging.DEBUG, f'add_to_queue: adding depth {depth} variance {variance} to the queue')\n if self.n_queued < self.median_length:\n self.queue_fill(depth, variance)\n else:\n median_depth, median_variance = self.queue_insert(depth, variance)\n self.update_node(median_depth, median_variance)\n else:\n self.update_node(depth, variance)\n\n def add_point_to_node(self, depth: float, vertical_uncertainty: float, horizontal_uncertainty: float, distance_to_node: float):\n \"\"\"\n Insert a point into the node. This will compute the variance scale factor for the new data, and send the data\n into the estimation queue.\n\n Parameters\n ----------\n depth\n new depth value to add to the queue\n vertical_uncertainty\n new vertical uncertainty value associated with the point, assumes 2 sigma\n horizontal_uncertainty\n new horizontal uncertainty value associated with the point, assumes 2 sigma\n distance_to_node\n distance from point to node\n \"\"\"\n\n conf_95_percent = 1.96\n if np.isnan(self.predicted_depth):\n self.logger.log(logging.DEBUG, f'add_point_to_node: Sounding rejected with predicted depth of NaN, sounding depth = {depth}')\n return\n # euclidean distance in projected space, i.e. distance sounding is being propagated from touchdown boresight\n # to node estimation point\n dist = np.sqrt(distance_to_node)\n if self.predicted_depth:\n target_depth = self.predicted_depth\n # do the test for blunders here, since it makes no sense to test when there is no predicted depth\n # blunders = beam solutions generated by the multibeam that do not correctly represent the seafloor\n blunder_limit = min(target_depth - self.blunder_min, target_depth - self.blunder_percent * abs(target_depth))\n blunder_limit = min(blunder_limit, target_depth - self.blunder_scalar * np.sqrt(self.predicted_variance))\n if depth < blunder_limit:\n self.logger.log(logging.DEBUG, f'add_point_to_node: Sounding rejected, {depth} less than blunder limit {blunder_limit}')\n return\n else:\n self.logger.log(logging.DEBUG, f'add_point_to_node: Blunder limit test pass, no predicted depth for this node')\n target_depth = depth\n calculated_captdist = self.capture_dist_scale * abs(target_depth)\n if dist > max(calculated_captdist, 0.5):\n self.logger.log(logging.DEBUG, f'add_point_to_node: sounding rejected, {dist} greater than max(0.5 or calculated capture distance {calculated_captdist})')\n return\n self.logger.log(logging.DEBUG, f'add_point_to_node: sounding accepted at node, distance {dist}m, target depth {max(calculated_captdist, 0.5)}m')\n # add horizontal positioning uncertainty, assumes 2sigma\n dist += conf_95_percent * np.sqrt(horizontal_uncertainty)\n # TODO this asked for range (range != 0) in the original source, don't have range\n sounding_range = 0.0\n if sounding_range != 0.0 and (not np.isnan(self.predicted_depth) and self.predicted_depth):\n offset = self.predicted_depth - depth\n self.logger.log(logging.DEBUG, f'add_point_to_node: adding offset to depth')\n else:\n offset = 0.0\n variance = vertical_uncertainty * (1.0 + self.var_scale * (dist ** self.dist_exponent))\n self.add_to_queue(depth + offset, variance)\n self.nominated = None\n\n def _return_nominated_answer(self, value: tuple = ('depth', 'uncertainty')):\n \"\"\"\n Return the data for the provided value identifiers for the nominated hypothesis\n\n Parameters\n ----------\n value\n list of the values you want to extract from this node, one of 'depth', 'uncertainty', 'ratio', 'n_hypotheses'\n\n Returns\n -------\n list\n list of floats for each value identifier provided\n \"\"\"\n\n data = []\n for value_id in value:\n if value_id == 'depth':\n data.append(self.nominated.current_depth)\n elif value_id == 'uncertainty':\n data.append(self.stddev_to_conf_scale * np.sqrt(self.nominated.current_variance))\n elif value_id == 'ratio':\n data.append(0.0) # having a nominated hypothesis means there is no ratio, only one hypothesis\n elif value_id == 'n_hypotheses':\n data.append(self.return_number_of_hypotheses())\n self.logger.log(logging.DEBUG, f'_return_nominated_answer: good hypothesis, returning {data} for {value}')\n return data\n\n def _return_answer_from_hypothesis(self, hyp: Hypothesis, ratio: float, value: tuple = ('depth', 'uncertainty')):\n \"\"\"\n Provide the answer for the given value identifiers for the given hypothesis.\n\n Parameters\n ----------\n hyp\n selected hypothesis to get an answer from\n ratio\n hypothesis strength ratio for this hypothesis\n value\n list of the values you want to extract from this node, one of 'depth', 'uncertainty', 'ratio', 'n_hypotheses'\n\n Returns\n -------\n list\n list of floats for each value identifier provided\n \"\"\"\n\n data = []\n if hyp.number_of_points > 0:\n # Only reconstruct if some data was involved in the construction of the hypothesis. This excludes\n # initial hypotheses from an initialisation surface, which are set up with n_j = 0.\n for value_id in value:\n if value_id == 'depth':\n data.append(hyp.current_depth)\n elif value_id == 'uncertainty':\n if self.variance_selection == 'max':\n data.append(self.stddev_to_conf_scale * np.sqrt(max(hyp.current_variance, hyp.variance_estimate)))\n elif self.variance_selection == 'input':\n data.append(self.stddev_to_conf_scale * np.sqrt(hyp.variance_estimate))\n else:\n data.append(self.stddev_to_conf_scale * np.sqrt(hyp.current_variance))\n elif value_id == 'ratio':\n data.append(ratio) # only one hypothesis\n elif value_id == 'n_hypotheses':\n data.append(self.return_number_of_hypotheses())\n self.logger.log(logging.DEBUG, f'_return_answer_from_hypothesis: good hypothesis, returning {data} for {value}')\n else:\n self.logger.log(logging.DEBUG, f'_return_answer_from_hypothesis: hypothesis empty, returning nodatavalues')\n for _ in value:\n data.append(self.no_data_value)\n return data\n\n def extract_node_value(self, value: tuple = ('depth', 'uncertainty')):\n \"\"\"\n Extract a node value for each of the provided value identifiers. These values come from either the nominated\n hypothesis or a selected hypothesis that has the most points of all hypotheses. If there is no hypothesis,\n you will get a self.no_data_value for each requested value identifier.\n\n Parameters\n ----------\n value\n list of the values you want to extract from this node, one of 'depth', 'uncertainty', 'ratio', 'n_hypotheses'\n\n Returns\n -------\n list\n list of the values for each value identifier provided\n \"\"\"\n\n self.logger.log(logging.DEBUG, f'extract_node_value: getting hypothesis answer for {value}')\n if self.nominated is not None:\n self.logger.log(logging.DEBUG, f'extract_node_value: using nominated hypothesis')\n return self._return_nominated_answer(value)\n if not self.hypotheses:\n self.logger.log(logging.DEBUG, f'extract_node_value: no hypothesis found')\n data = []\n for _ in value:\n data.append(self.no_data_value)\n elif len(self.hypotheses) == 1: # Special case: only one depth hypothesis (the usual case, we hope ...)\n self.logger.log(logging.DEBUG, f'extract_node_value: only one hypothesis!')\n hyp = self.hypotheses[0]\n data = self._return_answer_from_hypothesis(hyp, 0.0, value) # ratio of 0 for only having one hypothesis\n else:\n self.logger.log(logging.DEBUG, f'extract_node_value: multiple hypotheses found')\n hyp, ratio = self.choose_hypothesis()\n data = self._return_answer_from_hypothesis(hyp, ratio, value)\n return data\n\n def extract_closest_node_value(self, depth: float, variance: float, value: tuple = ('depth', 'uncertainty')):\n \"\"\"\n Extract the node values for the hypothesis which is closest in depth to the supplied depth/variance point values, in a\n minimum error sense. If there are no depth hypotheses in this node, self.no_data_value is returned.\n\n Parameters\n ----------\n depth\n depth value to use in the query\n variance\n variance value to use in the query\n value\n list of the values you want to extract from this node, one of 'depth', 'uncertainty', 'ratio', 'n_hypotheses'\n\n Returns\n -------\n list\n list of the values for each value identifier provided\n \"\"\"\n\n if self.nominated is not None:\n self.logger.log(logging.DEBUG, f'extract_closest_node_value: using nominated hypothesis')\n return self._return_nominated_answer(value)\n\n if not self.hypotheses or len(self.hypotheses) == 1:\n self.logger.log(logging.DEBUG, f'extract_closest_node_value: one hypothesis or less, falling back on basic extraction')\n # with no hypotheses or just one hypothesis, this is just doing the basic extraction\n return self.extract_node_value(value)\n min_error = None\n nearest_hypo = None\n total_points = 0\n for hyp in self.hypotheses:\n if hyp.number_of_points > 0: # check that some data were used in making the hypothesis before accepting it as valid\n error = abs((hyp.current_depth - depth) / np.sqrt(variance))\n if min_error is None or error < min_error:\n min_error = error\n nearest_hypo = hyp\n total_points += hyp.number_of_points\n if nearest_hypo is None: # should never get to this point\n self.logger.log(logging.WARNING, f'extract_closest_node_value: no hypothesis found!')\n data = []\n for _ in value:\n data.append(self.no_data_value)\n else:\n ratio = max(0.0, self.max_hypothesis_ratio - (nearest_hypo.number_of_points / (total_points - nearest_hypo.number_of_points)))\n data = self._return_answer_from_hypothesis(nearest_hypo, ratio, value)\n self.logger.log(logging.DEBUG, f'extract_closest_node_value: found {data} for {value}')\n return data\n\n def extract_posterior_weighted_node_value(self, depth: float, variance: float, value: tuple = ('depth', 'uncertainty')):\n \"\"\"\n Extract a posterior weighted best depth hypothesis using the provided depth/variance values as a guide. Returns\n the hypothesis value for each provided value identifier in the list.\n\n Parameters\n ----------\n depth\n depth value to use in the query\n variance\n variance value to use in the query\n value\n list of the values you want to extract from this node, one of 'depth', 'uncertainty', 'ratio', 'n_hypotheses'\n\n Returns\n -------\n list\n list of the values for each value identifier provided\n \"\"\"\n\n if self.nominated is not None:\n self.logger.log(logging.DEBUG, f'extract_posterior_weighted_node_value: using nominated hypothesis')\n return self._return_nominated_answer(value)\n if not self.hypotheses or len(self.hypotheses) == 1:\n self.logger.log(logging.DEBUG, f'extract_posterior_weighted_node_value: one hypothesis or less, falling back on basic extraction')\n # with no hypotheses or just one hypothesis, this is just doing the basic extraction\n return self.extract_node_value(value)\n max_posterior = None\n nearest_hypo = None\n total_points = 0\n for hyp in self.hypotheses:\n if hyp.number_of_points > 0: # check that some data were used in making the hypothesis before accepting it as valid\n mean = hyp.current_depth\n posterior = -(depth - mean) ** 2 / (2.0 * variance) + np.log(hyp.number_of_points)\n if max_posterior is None or posterior > max_posterior:\n max_posterior = posterior\n nearest_hypo = hyp\n total_points += hyp.number_of_points\n if nearest_hypo is None: # should never get to this point\n self.logger.log(logging.WARNING, f'extract_posterior_weighted_node_value: no hypothesis found!')\n data = []\n for _ in value:\n data.append(self.no_data_value)\n else:\n ratio = max(0.0, self.max_hypothesis_ratio - (nearest_hypo.number_of_points / (total_points - nearest_hypo.number_of_points)))\n data = self._return_answer_from_hypothesis(nearest_hypo, ratio, value)\n self.logger.log(logging.DEBUG, f'extract_posterior_weighted_node_value: found {data} for {value}')\n return data\n\n def return_depth(self):\n \"\"\"\n Return the depth for the 'best' hypothesis in this node. In this case, the 'best' hypothesis is the hypothesis\n with the most points. If there is no hypothesis, this returns self.no_data_value\n\n Returns\n -------\n float\n depth value for the best hypothesis\n \"\"\"\n\n return self.extract_node_value(('depth', ))[0]\n\n def return_uncertainty(self):\n \"\"\"\n Return the uncertainty for the 'best' hypothesis in this node. In this case, the 'best' hypothesis is the hypothesis\n with the most points. If there is no hypothesis, this returns self.no_data_value\n\n Returns\n -------\n float\n uncertainty value for the best hypothesis\n \"\"\"\n\n return self.extract_node_value(('uncertainty', ))[0]\n\n def return_number_of_hypotheses(self):\n \"\"\"\n Return the total number of hypotheses in this node.\n\n Returns\n -------\n int\n the total number of hypotheses in the node\n \"\"\"\n\n return len(self.hypotheses)\n\n def dump_hypotheses(self):\n \"\"\"\n Print the status of each hypothesis\n \"\"\"\n\n for hyp in self.hypotheses:\n print('Hypothesis {} - depth={}, variance={}, number_of_points={}'.format(hyp.hypothesis_number, hyp.current_depth,\n hyp.current_variance, hyp.number_of_points))\n\n\nclass CubeGrid:\n def __init__(self, minimum_easting: float, maximum_northing: float, num_columns: int, num_rows: int,\n resolution_x: float, resolution_y: float, param: CubeParameters, use_queue: bool = True,\n logfile: str = None, debug: bool = False):\n \"\"\"\n Main structure for Cube, holds CubeNodes in a grid with metadata.\n\n Parameters\n ----------\n minimum_easting\n minimum easting extent of the grid\n maximum_northing\n maximum northing extent of the grid\n num_columns\n number of columns in the grid\n num_rows\n number of rows in the grid\n resolution_x\n the resolution in the x direction of the grid (width of columns)\n resolution_y\n the resolution in the y direction of the grid (height of rows)\n param\n CubeParameters object with the default settings for the grid\n use_queue\n Executes the 'Reordering' step, see CUBE User Manual 3.1. With this set to False, this step is skipped.\n User Manual states that with multiple hypothesis implementation of CUBE, Reordering is no longer necessary.\n logfile\n optional path to a logfile\n debug\n if True, will print debug messages to the logger\n \"\"\"\n\n self.param = param\n self.no_data_value = param.no_data_value\n self.dist_scale = param.dist_scale\n self.inv_dist_exponent = param.inv_dist_exponent\n self.min_context = param.min_context\n self.max_context = param.max_context\n self.iho_order = param.iho_order\n self.iho_fixed, self.iho_percent = get_iho_limits(param.iho_order)\n self.use_queue = use_queue\n\n self.minimum_easting = minimum_easting\n self.maximum_northing = maximum_northing\n\n self.num_columns = num_columns\n self.num_rows = num_rows\n self.resolution_x = resolution_x\n self.resolution_y = resolution_y\n\n self.logfile = logfile\n\n loglvl = logging.INFO\n if debug:\n loglvl = logging.DEBUG\n\n if self.logfile:\n self.logger = return_logger(self.logfile, loglevel=loglvl)\n else:\n self.logger = return_logger(loglevel=loglvl)\n self.debug = debug\n\n self.grid = []\n for row in range(self.num_rows):\n rdata = []\n for column in range(self.num_columns):\n rdata.append(CubeNode(bayes_factor_threshold=param.bayes_factor_threshold, est_offset=param.est_offset,\n runlength_threshold=param.runlength_threshold, discount=param.discount, quotient_limit=param.quotient_limit,\n median_length=param.median_length, blunder_min=param.blunder_min, blunder_percent=param.blunder_percent,\n blunder_scalar=param.blunder_scalar, capture_dist_scale=param.capture_dist_scale,\n var_scale=param.var_scale, dist_exponent=param.dist_exponent, stddev_to_conf_scale=param.stddev_to_conf_scale,\n no_data_value=param.no_data_value, variance_selection=param.variance_selection, use_queue=use_queue, logger=self.logger))\n self.grid.append(rdata)\n\n # def __repr__(self):\n # print(f'CubeGrid: (rows={self.num_rows} x columns={self.num_columns})')\n # print('**************************************')\n # print(f'Contains {self.total_nodes_count} total nodes, {self.empty_nodes_count} empty and {self.populated_nodes_count} populated')\n\n def _validate_insert_points(self, depth: np.ndarray, horizontal_uncertainty: np.ndarray, vertical_uncertainty: np.ndarray,\n easting: np.ndarray, northing: np.ndarray):\n if isinstance(depth, (int, float)):\n depth = np.array([depth])\n elif isinstance(depth, list):\n depth = np.array(depth)\n\n if isinstance(horizontal_uncertainty, (int, float)):\n horizontal_uncertainty = np.array([horizontal_uncertainty])\n elif isinstance(horizontal_uncertainty, list):\n horizontal_uncertainty = np.array(horizontal_uncertainty)\n\n if isinstance(vertical_uncertainty, (int, float)):\n vertical_uncertainty = np.array([vertical_uncertainty])\n elif isinstance(vertical_uncertainty, list):\n vertical_uncertainty = np.array(vertical_uncertainty)\n\n if isinstance(easting, (int, float)):\n easting = np.array([easting])\n elif isinstance(easting, list):\n easting = np.array(easting)\n\n if isinstance(northing, (int, float)):\n northing = np.array([northing])\n elif isinstance(northing, list):\n northing = np.array(northing)\n\n assert depth.size == horizontal_uncertainty.size == vertical_uncertainty.size == easting.size == northing.size\n return depth, horizontal_uncertainty, vertical_uncertainty, easting, northing\n\n @property\n def populated_nodes_count(self):\n cnt = 0\n for row in range(self.num_rows):\n for col in range(self.num_columns):\n node = self.grid[row][col]\n if node.hypotheses or node.n_queued:\n cnt += 1\n return cnt\n\n @property\n def empty_nodes_count(self):\n cnt = 0\n for row in range(self.num_rows):\n for col in range(self.num_columns):\n node = self.grid[row][col]\n if not node.hypotheses and not node.n_queued:\n cnt += 1\n return cnt\n\n @property\n def total_nodes_count(self):\n return self.num_rows * self.num_columns\n\n def insert_points(self, depth: np.ndarray, horizontal_uncertainty: np.ndarray, vertical_uncertainty: np.ndarray,\n easting: np.ndarray, northing: np.ndarray):\n \"\"\"\n Add an array of point values to the grid\n\n Parameters\n ----------\n depth\n new depth values to add to the queue\n vertical_uncertainty\n new vertical uncertainty values associated with the points, assumes 2 sigma\n horizontal_uncertainty\n new horizontal uncertainty values associated with the points, assumes 2 sigma\n easting\n new easting values associated with the points\n northing\n new northing values associated with the points\n \"\"\"\n\n depth, horizontal_uncertainty, vertical_uncertainty, easting, northing = self._validate_insert_points(depth, horizontal_uncertainty, vertical_uncertainty, easting, northing)\n conf_95_percent = 1.96\n conf_99_percent = 2.95\n self.logger.log(logging.DEBUG, f'insert_points: Adding {len(depth)} points...')\n for i in range(len(depth)):\n self.logger.log(logging.DEBUG, f'insert_points: x:{easting[i]}, y:{northing[i]}, z:{depth[i]}, thu:{horizontal_uncertainty[i]}, tvu:{vertical_uncertainty[i]}')\n # Determine IHO S-44 derived limits on maximum variance\n max_variance_allowed = (self.iho_fixed + self.iho_percent * depth[i] ** 2) / conf_95_percent ** 2\n ratio = max_variance_allowed / vertical_uncertainty[i]\n if ratio <= 2.0:\n ratio = 2.0\n max_radius = conf_99_percent * np.sqrt(horizontal_uncertainty[i])\n radius = self.dist_scale * (ratio - 1.0) ** self.inv_dist_exponent - max_radius\n if radius < 0.0:\n radius = self.dist_scale\n elif radius > max_radius:\n radius = max_radius\n if radius < self.dist_scale:\n radius = self.dist_scale\n self.logger.log(logging.DEBUG, f'insert_points: dist_scale:{self.dist_scale}, ratio:{ratio}, max radius:{max_radius}, max_variance:{max_variance_allowed}')\n # determine the coordinates of the effect square. This is designed to compute the largest region the sounding\n # can effect, and hence to make the insertion more efficient by only offering the sounding where it is likely\n # to be used\n min_x = int(((easting[i] - radius) - self.minimum_easting) / self.resolution_x)\n max_x = int(((easting[i] + radius) - self.minimum_easting) / self.resolution_x)\n min_y = int((self.maximum_northing - (northing[i] + radius)) / self.resolution_y)\n max_y = int((self.maximum_northing - (northing[i] - radius)) / self.resolution_y)\n # check that the sounding hits somewhere in the grid\n if max_x < 0 or min_x >= (self.num_columns - 1) or max_y < 0 or min_y >= (self.num_rows - 1):\n self.logger.log(logging.DEBUG, f'insert_points: Sounding out of bounds, ({min_x},{min_y}) ({max_x},{max_y})')\n continue # out of bounds\n # clip to the interior of the current grid\n min_x = max(0, min_x)\n max_x = min(max_x, self.num_columns - 1)\n min_y = max(0, min_y)\n max_y = min(max_y, self.num_rows - 1)\n self.logger.log(logging.DEBUG, f'insert_points: clipped row,column limits to use in search, ({min_x},{min_y}) ({max_x},{max_y})')\n for y in range(min_y, max_y + 1):\n for x in range(min_x, max_x + 1):\n node_x = self.minimum_easting + (x * self.resolution_x) + (self.resolution_x / 2)\n node_y = self.maximum_northing - (y * self.resolution_y) - (self.resolution_y / 2)\n distance_sq = (node_x - easting[i]) ** 2 + (node_y - northing[i]) ** 2\n if distance_sq >= radius ** 2:\n self.logger.log(logging.DEBUG, f'insert_points: rejecting point as out of distance to node at row/col, ({y}, {x})')\n continue # distance to great, not including this point in this node\n self.logger.log(logging.DEBUG, f'insert_points: adding point to node at row/col, ({y}, {x})')\n self.grid[y][x].add_point_to_node(depth[i], vertical_uncertainty[i], horizontal_uncertainty[i], distance_sq)\n\n def flush_node_queues(self):\n \"\"\"\n Flush the queues for each node\n \"\"\"\n for row in range(self.num_rows):\n for col in range(self.num_columns):\n node = self.grid[row][col]\n node.flush_queue()\n\n def get_grid_values(self, value: tuple = ('depth', 'uncertainty'), method: str = 'local'):\n \"\"\"\n Get the values for each node in the grid for each value identifier in the value list.\n\n Parameters\n ----------\n value\n list of the values you want to extract from this grid, one of 'depth', 'uncertainty', 'ratio', 'n_hypotheses'\n method\n method to use in determining the appropriate hypothesis value. 'local' to use the local spatial\n context to find the closest node with a single hypothesis and use that hypothesis depth to find the nearest\n hypothesis in terms of depth in the current node. 'prior' to use the hypothesis with the most points\n associated with it. 'posterior' to combine both prior and local methods to form an approximate Bayesian\n posterior distribution. 'predict' to get the hypothesis closest to the predicted depth associated with\n each node.\n\n Returns\n -------\n list\n list of numpy arrays for the grid, one for each variable identifier in the provided value list\n \"\"\"\n data = []\n self.logger.log(logging.DEBUG, f'get_grid_values: getting grid values for {value} using method {method}')\n for _ in value:\n vgrid = np.full((self.num_rows, self.num_columns), self.no_data_value)\n data.append(vgrid)\n for row in range(self.num_rows):\n for col in range(self.num_columns):\n node = self.grid[row][col]\n node_data = data\n if method in ['local', 'posterior']:\n node_hcount = node.return_number_of_hypotheses()\n if node_hcount <= 1:\n node_data = node.extract_node_value(value)\n else:\n closest_node = None\n for offset in range(self.min_context, self.max_context + 1):\n target_rows = [row - offset, row + offset]\n for target_row in target_rows:\n if 0 <= target_row < self.num_rows:\n for col_offset in range(-offset, offset + 1):\n target_col = col + col_offset\n if target_col < 0 or target_col >= self.num_columns:\n continue\n chk_node_hcount = self.grid[target_row][target_col].return_number_of_hypotheses()\n if chk_node_hcount == 1:\n closest_node = self.grid[target_row][target_col]\n self.logger.log(logging.DEBUG, f'get_grid_values: found closest node during row search at ({target_row},{target_col})')\n break\n if closest_node is not None:\n break\n if closest_node is None:\n target_cols = [col - offset, col + offset]\n for target_col in target_cols:\n if 0 <= target_col < self.num_columns:\n for row_offset in range(-offset + 1, offset):\n target_row = row + row_offset\n if target_row < 0 or target_row >= self.num_rows:\n continue\n chk_node_hcount = self.grid[target_row][target_col].return_number_of_hypotheses()\n if chk_node_hcount == 1:\n closest_node = self.grid[target_row][target_col]\n self.logger.log(logging.DEBUG, f'get_grid_values: found closest node during column search at ({target_row},{target_col})')\n break\n if closest_node is not None:\n break\n if closest_node is not None:\n break\n if closest_node is None: # default to the basic node hypothesis selection, couldn't find a good hypothesis in the region\n self.logger.log(logging.DEBUG, f\"get_grid_values: default to the basic node hypothesis selection, couldn't find a good hypothesis in the region\")\n node_data = node.extract_node_value(value)\n else:\n self.logger.log(logging.DEBUG, f\"get_grid_values: extract value from closest node found\")\n closest_data = closest_node.extract_node_value(('depth', 'uncertainty'))\n if method == 'local':\n node_data = node.extract_closest_node_value(closest_data[0], closest_data[1], value)\n elif method == 'posterior':\n node_data = node.extract_posterior_weighted_node_value(closest_data[0], closest_data[1], value)\n elif method == 'prior':\n node_data = node.extract_node_value(value)\n elif method == 'predicted':\n node_data = node.extract_closest_node_value(node.predicted_depth, node.predicted_variance, value)\n for cnt, node_value in enumerate(node_data):\n data[cnt][row, col] = node_value\n return data\n\n def get_grid_depth(self, method: str = 'local'):\n \"\"\"\n Shortcut for get_grid_values, if you are only interested in depth.\n\n Parameters\n ----------\n method\n one of 'local', 'posterior', 'prior', 'predicted'. See get_grid_values for more info.\n\n Returns\n -------\n np.ndarray\n 2d numpy array of node depth values\n \"\"\"\n\n if method == 'local':\n return self.get_grid_values(('depth',), 'local')[0]\n elif method == 'posterior':\n return self.get_grid_values(('depth',), 'posterior')[0]\n elif method == 'prior':\n return self.get_grid_values(('depth',), 'prior')[0]\n elif method == 'predicted':\n return self.get_grid_values(('depth',), 'predicted')[0]\n\n def get_grid_uncertainty(self, method: str = 'local'):\n \"\"\"\n Shortcut for get_grid_values, if you are only interested in uncertainty.\n\n Parameters\n ----------\n method\n one of 'local', 'posterior', 'prior', 'predicted'. See get_grid_values for more info.\n\n Returns\n -------\n np.ndarray\n 2d numpy array of node uncertainty values\n \"\"\"\n\n if method == 'local':\n return self.get_grid_values(('uncertainty',), 'local')[0]\n elif method == 'posterior':\n return self.get_grid_values(('uncertainty',), 'posterior')[0]\n elif method == 'prior':\n return self.get_grid_values(('uncertainty',), 'prior')[0]\n elif method == 'predicted':\n return self.get_grid_values(('uncertainty',), 'predicted')[0]\n\n def get_grid_ratio(self, method: str = 'local'):\n \"\"\"\n Shortcut for get_grid_values, if you are only interested in ratio.\n\n Parameters\n ----------\n method\n one of 'local', 'posterior', 'prior', 'predicted'. See get_grid_values for more info.\n\n Returns\n -------\n np.ndarray\n 2d numpy array of node ratio values\n \"\"\"\n\n if method == 'local':\n return self.get_grid_values(('ratio',), 'local')[0]\n elif method == 'posterior':\n return self.get_grid_values(('ratio',), 'posterior')[0]\n elif method == 'prior':\n return self.get_grid_values(('ratio',), 'prior')[0]\n elif method == 'predicted':\n return self.get_grid_values(('ratio',), 'predicted')[0]\n\n def get_grid_number_hypotheses(self):\n \"\"\"\n Shortcut for get_grid_values, if you are only interested in the number of hypotheses. This is much faster than\n using get_grid_values if you only want hypotheses count, as it skips the expensive hypothesis selection logic.\n\n Returns\n -------\n np.ndarray\n 2d numpy array of node hypothesis count\n \"\"\"\n\n # you could use self.get_grid_values(['n_hypotheses'], 'local') to get the answer\n # but this would do the extra logic for determining the best hypothesis that is unnecessary here\n # let's shortcut this process to make it faster\n data = np.full((self.num_rows, self.num_columns), self.no_data_value)\n for row in range(self.num_rows):\n for col in range(self.num_columns):\n node = self.grid[row][col]\n data[row, col] = node.return_number_of_hypotheses()\n return data\n\n def get_grid_depth_and_uncertainty(self, method: str = 'local'):\n \"\"\"\n Shortcut for get_grid_values, if you are only interested in depth and uncertainty.\n\n Parameters\n ----------\n method\n one of 'local', 'posterior', 'prior', 'predicted'. See get_grid_values for more info.\n\n Returns\n -------\n list\n list of 2d numpy array of node depth and uncertainty values, in that order\n \"\"\"\n\n if method == 'local':\n return self.get_grid_values(('depth', 'uncertainty',), 'local')\n elif method == 'posterior':\n return self.get_grid_values(('depth', 'uncertainty',), 'posterior')\n elif method == 'prior':\n return self.get_grid_values(('depth', 'uncertainty',), 'prior')\n elif method == 'predicted':\n return self.get_grid_values(('depth', 'uncertainty',), 'predicted')\n\n\ndef run_cube_gridding(depth: np.ndarray, horizontal_uncertainty: np.ndarray, vertical_uncertainty: np.ndarray,\n easting: np.ndarray, northing: np.ndarray, num_columns: int, num_rows: int, minimum_easting: float,\n maximum_northing: float, method: str, iho_order: str, grid_resolution_x: float, grid_resolution_y: float,\n **kwargs):\n \"\"\"\n Entrance point in numba_cube, run this to run Cube.\n\n Grid contains the 2d list of CubeNodes which themselves contain the list of hypotheses. Currently an issue with\n jitclass that this class can't be returned from an njit function, you get a pickle/serialization error, due to the\n nested jitclass.\n\n I've heard there is a way to accomplish this with structref in numba that would avoid this issue,\n I have not looked into this yet.\n\n Parameters\n ----------\n depth\n 1d array of depth values\n horizontal_uncertainty\n 1d array of 2sigma horiz uncertainty values\n vertical_uncertainty\n 1d array of 2sigma vert uncertainty values\n easting\n 1d array of UTM easting values for the soundings\n northing\n 1d array of UTM northing values for the soundings\n num_columns\n number of columns in the grid\n num_rows\n number of rows in the grid\n minimum_easting\n minimum easting value for the grid to determine origin\n maximum_northing\n maximum northing value for the grid to determine origin\n method\n method to use in determining the appropriate hypothesis value. 'local' to use the local spatial\n context to find the closest node with a single hypothesis and use that hypothesis depth to find the nearest\n hypothesis in terms of depth in the current node. 'prior' to use the hypothesis with the most points\n associated with it. 'posterior' to combine both prior and local methods to form an approximate Bayesian\n posterior distribution. 'predict' to get the hypothesis closest to the predicted depth associated with\n each node.\n iho_order\n string representation of one of the IHO order categories, i.e. 'special' or 'order1a'\n grid_resolution_x\n grid resolution in easting (column) direction in meters\n grid_resolution_y\n grid resolution in northing (row) direction in meters\n kwargs\n keyword arguments used to modify cube parameters\n\n Returns\n -------\n np.ndarray\n gridded depth values of shape (rows, columns) for the grid\n np.ndarray\n gridded uncertainty values of shape (rows, columns) for the grid\n np.ndarray\n gridded ratio values of shape (rows, columns) for the grid\n np.ndarray\n gridded hypothesis count values of shape (rows, columns) for the grid\n \"\"\"\n\n cp = CubeParameters()\n cp.initialize(iho_order=iho_order, grid_resolution_x=grid_resolution_x, grid_resolution_y=grid_resolution_y)\n for kpam, kval in kwargs.items():\n if kpam in cp.__dir__():\n setattr(cp, kpam, kval)\n if method in ['local', 'posterior', 'prior', 'predicted']:\n cg = CubeGrid(minimum_easting=minimum_easting, maximum_northing=maximum_northing, num_rows=num_rows,\n num_columns=num_columns, resolution_x=grid_resolution_x, resolution_y=grid_resolution_y,\n param=cp, use_queue=True, debug=False)\n cg.insert_points(depth, horizontal_uncertainty, vertical_uncertainty, easting, northing)\n depth_grid, uncertainty_grid, ratio_grid, numhyp_grid = cg.get_grid_values(('depth', 'uncertainty', 'ratio', 'n_hypotheses'), method=method)\n else:\n raise NotImplementedError(f\"run_cube_gridding: {method} not supported, expected one of 'local', 'posterior', 'prior', 'predicted'\")\n return depth_grid, uncertainty_grid, ratio_grid, numhyp_grid\n\n\nif __name__ == '__main__':\n from time import perf_counter\n starttime = perf_counter()\n\n print('****Start****')\n _numpoints = 100000\n _x = np.random.uniform(low=403744.0, high=403776.0, size=_numpoints)\n _y = np.random.uniform(low=4122665.0, high=4122688.0, size=_numpoints)\n _z = np.random.uniform(low=13.0, high=15.0, size=_numpoints)\n _tvu = np.random.uniform(low=0.1, high=1.0, size=_numpoints)\n _thu = np.random.uniform(low=0.3, high=1.3, size=_numpoints)\n _numrows, _numcols = (32, 32)\n _resolution_x, _resolution_y = (1.0, 1.0)\n _depth_grid, _uncertainty_grid, _ratio_grid, _numhyp_grid = run_cube_gridding(_z, _thu, _tvu, _x, _y, _numcols,\n _numrows,\n min(_x), max(_y), 'local', 'order1a',\n _resolution_x, _resolution_y)\n\n endtime = perf_counter()\n print('****CUBE complete: {}****'.format((endtime - starttime)))"
] |
[
[
"numpy.log",
"numpy.sqrt",
"numpy.isnan",
"numpy.full",
"numpy.float32",
"numpy.floor",
"numpy.random.uniform",
"numpy.array",
"numpy.exp"
]
] |
filesmuggler/cutter_node
|
[
"c46f6c6033661c80145dcb69a4fb78177f6ee30f"
] |
[
"src/nowicki.py"
] |
[
"#!/usr/bin/env python\n\nimport sys,time\n\nimport rospy\nimport roslib\nimport numpy as np\n\nfrom cv_bridge import CvBridge, CvBridgeError\n\nimport cv2\n\nimport message_filters\n\nfrom sensor_msgs.msg import Image, CameraInfo\nfrom std_msgs.msg import Header\n\nclass CutterNode:\n def __init__(self):\n self.image = np.zeros([480,640])\n self.depth = np.zeros([480,640])\n self.depth_output = np.zeros([480,640])\n\n self.depth_timestamp = 0\n self.header = Header()\n self.debbbug = Image()\n\n '''Initialize ros publisher, ros subscriber'''\n # topic where we publish\n self.depth_aligned_pub = rospy.Publisher(\"/cutter_node_align_depth\",Image, queue_size=10)\n # cv bridge\n self.cv_bridge = CvBridge()\n # subscribed Topic\n self.image_subscriber = rospy.Subscriber(\"/grabcut\",Image, self.callback_image,queue_size=1)\n self.depth_subscriber = rospy.Subscriber(\"/align_depth\",Image,self.callback_depth,queue_size=1)\n \n\n def callback_image(self,data):\n # filter image elementwise numpy\n try:\n cv_image = self.cv_bridge.imgmsg_to_cv2(data, \"mono8\")\n self.image = cv_image\n except CvBridgeError as e:\n print(e)\n \n # compare two images\n\n ##### TO DO HERE #####\n\n self.depth_output = np.array(np.zeros([480,640]), dtype = np.dtype('f4'))\n ret,thresh1 = cv2.threshold(cv_image,10.0,255.0,cv2.THRESH_BINARY)\n thresh1_norm = cv2.normalize(thresh1,thresh1,0,1,cv2.NORM_MINMAX)\n # self.depth_output = thresh1_norm * self.depth\n self.depth_output = self.depth\n #self.depth_output = cv2.normalize(depth_out, depth_out, 0, 1, cv2.NORM_MINMAX)\n #self.depth_output = np.float32(self.depth_output)\n\n # self.depth_output = np.array(np.zeros([480,640]), dtype = np.dtype('f4'))\n # ret,thresh1 = cv2.threshold(cv_image,10.0,255.0,cv2.THRESH_BINARY)\n # thresh1_norm = cv2.normalize(thresh1,thresh1,0,1,cv2.NORM_MINMAX)\n # thresh1_norm_32 = np.float32(thresh1_norm)\n # depth_out = np.multiply(self.depth,thresh1_norm_32)\n # self.depth_output = cv2.normalize(depth_out, depth_out, 0, 255, cv2.NORM_MINMAX)\n # self.depth_output = np.float32(depth_out)\n print(\"x\")\n\n\n ##### END TO DO #####\n \n try:\n self.align_message = self.cv_bridge.cv2_to_imgmsg(self.depth_output, \"16UC1\")\n self.align_message.header.stamp = self.depth_timestamp\n self.align_message.header.frame_id = \"map\"\n self.align_message.header = self.header\n self.depth_aligned_pub.publish(self.align_message)\n except CvBridgeError as e:\n print(e)\n cv2.imshow(\"cutter_node_depth_output\", self.depth_output)\n cv2.imshow(\"cutter_node_mask\", thresh1_norm)\n cv2.waitKey(3)\n\n def callback_depth(self,data):\n # filter image elementwise numpy\n try:\n self.debbbug = data\n\n cv_image = self.cv_bridge.imgmsg_to_cv2(data, \"16UC1\")\n self.depth_timestamp = data.header.stamp\n self.header = data.header\n # Convert the depth image to a Numpy array since most cv2 functions require Numpy arrays.\n # cv_image_array = np.array(cv_image, dtype = np.dtype('f4'))\n # Normalize the depth image to fall between 0 (black) and 1 (white) \n #cv_image_norm = cv2.normalize(cv_image_array, cv_image_array, 0, 1, cv2.NORM_MINMAX)\n # cv_image_norm = np.float32(cv_image_array)\n self.depth = cv_image\n\n # cv_image = self.cv_bridge.imgmsg_to_cv2(data, \"32FC1\")\n # self.depth_timestamp = data.header.stamp\n # Convert the depth image to a Numpy array since most cv2 functions require Numpy arrays.\n #cv_image_array = np.array(cv_image, dtype = np.dtype('f4'))\n # Normalize the depth image to fall between 0 (black) and 1 (white) \n # cv_image_norm = cv2.normalize(cv_image_array, cv_image_array, 0, 1, cv2.NORM_MINMAX)\n # cv_image_norm = np.float32(cv_image_norm)\n #self.depth = cv_image_norm\n #self.depth = cv_image_array\n except CvBridgeError as e:\n print(\"cv bridge: \",e)\n\n def cameras_callback(self, camera_msg, depth_msg,image_msg):\n timestamp_depth = depth_msg.header.stamp\n timestamp_camera = camera_msg.header.stamp\n timestamp_image = image_msg.header.stamp\n #print(\"D: \", timestamp_depth, \" C: \", timestamp_camera, \" difference: \", timestamp_depth - timestamp_camera)\n\n # filter image elementwise numpy DEPTH\n try:\n depth_cv_image = self.cv_bridge.imgmsg_to_cv2(depth_msg, \"32FC1\")\n depth_cv_image_array = np.array(depth_cv_image, dtype = np.dtype('f4')) \n depth_cv_image_norm = cv2.normalize(depth_cv_image_array, depth_cv_image_array, 0, 1, cv2.NORM_MINMAX)\n depth_cv_image_norm = np.float32(depth_cv_image_norm)\n self.depth = depth_cv_image_norm\n except CvBridgeError as e:\n print(\"cv bridge: \",e)\n\n # filter image elementwise numpy IMAGE\n try:\n image_cv_image = self.cv_bridge.imgmsg_to_cv2(data, \"mono8\")\n self.image = image_cv_image\n except CvBridgeError as e:\n print(e)\n \n # compare two images\n\n self.depth_output = np.array(np.zeros([480,640]), dtype = np.dtype('f4'))\n\n ret,thresh1 = cv2.threshold(image_cv_image,10.0,255.0,cv2.THRESH_BINARY)\n \n depth_out = thresh1 * self.depth \n\n depth_out = np.float32(depth_out)\n\n self.depth_output = cv2.normalize(depth_out, depth_out, 0, 1, cv2.NORM_MINMAX)\n\n self.depth_output = np.float32(self.depth_output)\n \n try:\n self.align_message.header.stamp = timestamp_camera\n self.align_message = self.cv_bridge.cv2_to_imgmsg(self.depth_output, \"32FC1\")\n self.align_message.header.frame_id = \"map\"\n self.depth_aligned_pub.publish(self.align_message)\n except CvBridgeError as e:\n print(e)\n \n \n # pub_odom.publish(odom_msg)\n # pub_pointcloud.publish(point_cloud2_msg)\n \n\ndef main(args):\n ## debug\n cv2.namedWindow('cutter_node_depth_output')\n cv2.namedWindow('cutter_node_mask')\n rospy.init_node('cutter_node',anonymous=True)\n cn = CutterNode()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down ROS cutter node\")\n ## debug\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n main(sys.argv)"
] |
[
[
"numpy.dtype",
"numpy.zeros",
"numpy.float32"
]
] |
Aniket-Gujarathi/d2-net
|
[
"c716d37087390af04c8843452dadfa47a64cfbe2"
] |
[
"SuperGlue-pytorch/load_data_D2net.py"
] |
[
"import numpy as np\nimport torch\nimport os\nimport cv2\nimport math\nimport datetime\nimport random\nimport sys\nsys.path.append('../')\nfrom lib.save_features import extract\nfrom lib.model_test import D2Net\nfrom lib.model_test import D2Net\nfrom lib.pyramid import process_multiscale\n\nfrom scipy.spatial.distance import cdist\nfrom torch.utils.data import Dataset\n\nnp.random.seed(0)\n\n# CUDA\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\nclass SparseDataset(Dataset):\n \"\"\"Sparse correspondences dataset.\"\"\"\n\n def __init__(self, train_path, nfeatures):\n\n self.files = []\n self.files += [train_path + f for f in os.listdir(train_path)]\n\n self.nfeatures = nfeatures\n self.sift = cv2.xfeatures2d.SIFT_create(nfeatures=self.nfeatures)\n self.matcher = cv2.BFMatcher_create(cv2.NORM_L1, crossCheck=False)\n\n # Creating CNN model\n self.model = D2Net(\n \tmodel_file='/home/udit/d2-net/checkpoints/checkpoint_road_more/d2.15.pth',\n \tuse_relu=True,\n \tuse_cuda=use_cuda\n )\n\n self.device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\n def __len__(self):\n return len(self.files)\n\n def randomH(self, img1, min=0, max=360):\n img1 = np.array(img1)\n width, height = img1.shape\n theta = np.random.randint(low=min, high=max) * (np.pi / 180)\n Tx = width / 2\n Ty = height / 2\n sx = random.uniform(-1e-2, 1e-2)\n sy = random.uniform(-1e-2, 1e-2)\n p1 = random.uniform(-1e-4, 1e-4)\n p2 = random.uniform(-1e-4, 1e-4)\n\n alpha = np.cos(theta)\n beta = np.sin(theta)\n\n He = np.matrix([[alpha, beta, Tx * (1 - alpha) - Ty * beta], [-beta, alpha, beta * Tx + (1 - alpha) * Ty], [0, 0, 1]])\n Ha = np.matrix([[1, sy, 0], [sx, 1, 0], [0, 0, 1]])\n Hp = np.matrix([[1, 0, 0], [0, 1, 0], [p1, p2, 1]])\n\n H = He @ Ha @ Hp\n\n img2 = cv2.warpPerspective(img1, H, dsize=(width, height))\n\n\t\t#cv2.imshow(\"Image\", img2)\n\t\t#cv2.waitKey(0)\n\n return img2, H\n\n def __getitem__(self, idx):\n file_name = self.files[idx]\n img1 = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE)\n img2, M = self.randomH(img1, min=0, max=360)\n\n feat1 = extract(img1, self.model, self.device)\n feat2 = extract(img2, self.model, self.device)\n kp1, descs1, scores1 = feat1['keypoints'], feat1['descriptors'], feat1['scores']\n kp2, descs2, scores2 = feat2['keypoints'], feat2['descriptors'], feat2['scores']\n kp1 = np.delete(kp1, 2, 1)\n kp2 = np.delete(kp2, 2, 1)\n\n # limit the number of keypoints\n kp1_num = min(self.nfeatures, len(kp1))\n\n kp2_num = min(self.nfeatures, len(kp2))\n kp1 = kp1[:kp1_num]\n kp2 = kp2[:kp2_num]\n\n kp1_np = np.array([(kp[0], kp[1]) for kp in kp1])\n kp2_np = np.array([(kp[0], kp[1]) for kp in kp2])\n\n # skip this image pair if no keypoints detected in image\n if len(kp1) < 1 or len(kp2) < 1:\n return{\n 'keypoints0': torch.zeros([0, 0, 2], dtype=torch.double),\n 'keypoints1': torch.zeros([0, 0, 2], dtype=torch.double),\n 'descriptors0': torch.zeros([0, 2], dtype=torch.double),\n 'descriptors1': torch.zeros([0, 2], dtype=torch.double),\n 'image0': img1,\n 'image1': img2,\n 'file_name': file_name\n }\n\n # confidence of each key point\n # scores1_np = np.array([kp.response for kp in kp1])\n # scores2_np = np.array([kp.response for kp in kp2])\n\n kp1_np = kp1_np[:kp1_num, :]\n kp2_np = kp2_np[:kp2_num, :]\n descs1 = descs1[:kp1_num, :]\n descs2 = descs2[:kp2_num, :]\n scores1_np = scores1[:kp1_num]\n scores2_np = scores2[:kp2_num]\n\n # obtain the matching matrix of the image pair\n matched = self.matcher.match(descs1, descs2)\n kp1_projected = cv2.perspectiveTransform(kp1_np.reshape((1, -1, 2)), M)[0, :, :]\n dists = cdist(kp1_projected, kp2_np)\n\n min1 = np.argmin(dists, axis=0)\n min2 = np.argmin(dists, axis=1)\n\n min1v = np.min(dists, axis=1)\n min1f = min2[min1v < 3]\n\n xx = np.where(min2[min1] == np.arange(min1.shape[0]))[0]\n matches = np.intersect1d(min1f, xx)\n\n missing1 = np.setdiff1d(np.arange(kp1_np.shape[0]), min1[matches])\n missing2 = np.setdiff1d(np.arange(kp2_np.shape[0]), matches)\n\n MN = np.concatenate([min1[matches][np.newaxis, :], matches[np.newaxis, :]])\n MN2 = np.concatenate([missing1[np.newaxis, :], (len(kp2)) * np.ones((1, len(missing1)), dtype=np.int64)])\n MN3 = np.concatenate([(len(kp1)) * np.ones((1, len(missing2)), dtype=np.int64), missing2[np.newaxis, :]])\n all_matches = np.concatenate([MN, MN2, MN3], axis=1)\n\n kp1_np = kp1_np.reshape((1, -1, 2))\n kp2_np = kp2_np.reshape((1, -1, 2))\n descs1 = np.transpose(descs1 / 256.)\n descs2 = np.transpose(descs2 / 256.)\n\n img1 = torch.from_numpy(img1/255.).double()[None].cuda()\n img2 = torch.from_numpy(img2/255.).double()[None].cuda()\n print(kp1_np)\n return{\n 'keypoints0': list(kp1_np),\n 'keypoints1': list(kp2_np),\n 'descriptors0': list(descs1),\n 'descriptors1': list(descs2),\n 'scores0': list(scores1_np),\n 'scores1': list(scores2_np),\n 'image0': img1,\n 'image1': img2,\n 'all_matches': list(all_matches),\n 'file_name': file_name\n }\n"
] |
[
[
"numpy.matrix",
"numpy.random.seed",
"numpy.min",
"torch.zeros",
"numpy.arange",
"numpy.cos",
"scipy.spatial.distance.cdist",
"numpy.sin",
"numpy.concatenate",
"numpy.intersect1d",
"numpy.delete",
"torch.from_numpy",
"numpy.argmin",
"torch.cuda.is_available",
"numpy.transpose",
"torch.device",
"numpy.array",
"numpy.random.randint"
]
] |
goldblum/FeatureClustering
|
[
"55a1b233ccbdfad3e7f9be26006eccdfb76af28b"
] |
[
"models/ResNet12_embedding.py"
] |
[
"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom models.dropblock import DropBlock\n\n# This ResNet network was designed following the practice of the following papers:\n# TADAM: Task dependent adaptive metric for improved few-shot learning (Oreshkin et al., in NIPS 2018) and\n# A Simple Neural Attentive Meta-Learner (Mishra et al., in ICLR 2018).\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, drop_rate=0.0, drop_block=False, block_size=1):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.LeakyReLU(0.1)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = conv3x3(planes, planes)\n self.bn3 = nn.BatchNorm2d(planes)\n self.maxpool = nn.MaxPool2d(stride)\n self.downsample = downsample\n self.stride = stride\n self.drop_rate = drop_rate\n self.num_batches_tracked = 0\n self.drop_block = drop_block\n self.block_size = block_size\n self.DropBlock = DropBlock(block_size=self.block_size)\n\n def forward(self, x):\n self.num_batches_tracked += 1\n\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n out = self.maxpool(out)\n \n if self.drop_rate > 0:\n if self.drop_block == True:\n feat_size = out.size()[2]\n keep_rate = max(1.0 - self.drop_rate / (20*2000) * (self.num_batches_tracked), 1.0 - self.drop_rate)\n gamma = (1 - keep_rate) / self.block_size**2 * feat_size**2 / (feat_size - self.block_size + 1)**2\n out = self.DropBlock(out, gamma=gamma)\n else:\n out = F.dropout(out, p=self.drop_rate, training=self.training, inplace=True)\n\n return out\n \nclass ResNet(nn.Module):\n\n def __init__(self, block, keep_prob=1.0, avg_pool=False, drop_rate=0.0, dropblock_size=5):\n self.inplanes = 3\n super(ResNet, self).__init__()\n\n self.layer1 = self._make_layer(block, 64, stride=2, drop_rate=drop_rate)\n self.layer2 = self._make_layer(block, 160, stride=2, drop_rate=drop_rate)\n self.layer3 = self._make_layer(block, 320, stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)\n self.layer4 = self._make_layer(block, 640, stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)\n if avg_pool:\n self.avgpool = nn.AvgPool2d(2, stride=1)\n self.keep_prob = keep_prob\n self.keep_avg_pool = avg_pool\n self.dropout = nn.Dropout(p=1 - self.keep_prob, inplace=False)\n self.drop_rate = drop_rate\n \n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, stride=1, drop_rate=0.0, drop_block=False, block_size=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, drop_rate, drop_block, block_size))\n self.inplanes = planes * block.expansion\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n if self.keep_avg_pool:\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n return x\n\n\ndef resnet12(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-12 model.\n \"\"\"\n model = ResNet(BasicBlock, keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)\n return model\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.functional.dropout",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_"
]
] |
mathdugre/RUIS
|
[
"b2013e43b9178a3f990f609cdb2f021486e7f901"
] |
[
"report.py"
] |
[
"import datetime\nimport re\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\npd.set_option('display.float_format', lambda x: '%.3f' % x)\n\ndef timeTicks(x, pos): \n d = datetime.timedelta(microseconds=x//1000) \n return str(d)\n\ndef analysis(data_folder, skiprows=14, *, MIN_CPU_TRESHOLD=10, MIN_DISK_TRESHOLD=100, MIN_NET_TRESHOLD=100, MIN_NFS_TRESHOLD=100):\n # Threshold for the timing only; the plots show all data.\n # This is to avoid calculating resource usage when noise occurs.\n \n df_cpu = pd.read_csv(f\"{data_folder}/cpu.csv\", skiprows=skiprows)\n df_disk = pd.read_csv(f\"{data_folder}/dsk.csv\", skiprows=skiprows)\n df_net = pd.read_csv(f\"{data_folder}/net.csv\", skiprows=skiprows)\n df_numa = pd.read_csv(f\"{data_folder}/numa.csv\", skiprows=skiprows)\n df_tab = pd.read_csv(f\"{data_folder}/tab.csv\", skiprows=skiprows)\n\n df_cpu[\"Timestamp\"] = pd.to_datetime(df_cpu[\"#Date\"].map(str) + \"-\" + df_cpu[\"Time\"], format=\"%Y%m%d-%H:%M:%S.%f\")\n interval_time = (df_cpu[\"Timestamp\"][len(df_cpu)-1] - df_cpu[\"Timestamp\"][0]) / len(df_cpu)\n interval_seconds = interval_time / np.timedelta64(1, 's')\n df_cpu[\"Relative Timestamp\"] = df_cpu.index * interval_time\n df_disk[\"Relative Timestamp\"] = df_disk.index * interval_time\n df_net[\"Relative Timestamp\"] = df_net.index * interval_time\n df_numa[\"Relative Timestamp\"] = df_numa.index * interval_time\n df_tab[\"Relative Timestamp\"] = df_tab.index * interval_time\n \n Gb = 1024 ** 2\n\n########\n# CPU #\n#######\n n_cpu = (len(df_cpu.columns) - 2) // 12\n cpus = [f\"[CPU:{i}]\" for i in range(n_cpu)]\n total_cpu = df_cpu[[\"Relative Timestamp\"] + list(map(lambda x: x+\"Totl%\", cpus))]\n total_cpu = total_cpu.set_index(\"Relative Timestamp\")\n \n cpu_time = (sum([1 for row in (total_cpu >= MIN_CPU_TRESHOLD).values if any(row)]) * interval_time) / np.timedelta64(1, 's')\n\n print(f\"\"\"\n====================\n CPU Analysis\n====================\nTotal CPU core:\n{n_cpu}\n\nTotal CPU time (seconds):\n{((total_cpu >= MIN_CPU_TRESHOLD).values.sum() * interval_time) / np.timedelta64(1, 's'):0.3f}\n\nParallel CPU time (seconds):\n{cpu_time:0.3f}\n\nMakes span (seconds):\n{(df_cpu[\"Timestamp\"][len(df_cpu[\"Timestamp\"])-1] - df_cpu[\"Timestamp\"][0]) / np.timedelta64(1, 's'):0.3f}\n\n\"\"\")\n fig = plt.figure(figsize=(20,5)) \n ax = fig.add_subplot(111)\n cpu_avg = total_cpu[total_cpu.columns].apply(lambda x: sum(x)/len(total_cpu.columns), axis=1)\n ax.plot(total_cpu.index, cpu_avg)\n formatter = matplotlib.ticker.FuncFormatter(timeTicks) \n ax.xaxis.set_major_formatter(formatter)\n ax.set_ylim([0, 110])\n plt.title(\"CPU usage\")\n plt.ylabel(\"Average load (%)\")\n plt.xlabel(\"Time (seconds)\")\n plt.show()\n\n \n############\n# Disk I/O #\n############\n df_disk = df_disk.set_index(\"Relative Timestamp\")\n df_disk = df_disk.dropna(how=\"all\", axis=1)\n disk_read_col = [c for c in df_disk.columns if c.startswith(\"[DSK:\") and c.endswith(\"RKBytes\")]\n disk_write_col = [c for c in df_disk.columns if c.startswith(\"[DSK:\") and c.endswith(\"WKBytes\")]\n diskIO = df_disk[disk_read_col + disk_write_col] * interval_seconds\n \n max_diskIO_transfer = diskIO.max().max() * 1.1 / Gb\n \n disk_data_read = diskIO[disk_read_col].sum() / Gb\n disk_data_write = diskIO[disk_write_col].sum() / Gb\n \n diskIO_time = (sum([1 for row in (diskIO >= MIN_DISK_TRESHOLD).values if any(row)]) * interval_time) / np.timedelta64(1, 's')\n diskIO_read_time = (sum([1 for row in (diskIO[disk_read_col] >= MIN_DISK_TRESHOLD).values if any(row)]) * interval_time) / np.timedelta64(1, 's')\n diskIO_write_time = (sum([1 for row in (diskIO[disk_write_col] >= MIN_DISK_TRESHOLD).values if any(row)]) * interval_time) / np.timedelta64(1, 's')\n \n print(f\"\"\"\n====================\n Disk I/O\n====================\nData transfer (Gb):\nRead:\n{disk_data_read.round(3).to_string().replace(\"KB\", \"GB\")}\n\nWrite:\n{disk_data_write.round(3).to_string().replace(\"KB\", \"GB\")}\n\nTotal: {disk_data_read.sum()+disk_data_write.sum():0.3f}\n\nTotal I/O time (seconds):\n{((diskIO >= MIN_DISK_TRESHOLD).values.sum() * interval_time) / np.timedelta64(1, 's'):0.3f}\n\nParallel I/O time (seconds):\n{diskIO_time:0.3f}\n\"\"\")\n \n fig = plt.figure(figsize=(20,5)) \n ax = fig.add_subplot(111)\n \n disk_read_serie = diskIO[disk_read_col].apply(lambda x: sum(x), axis=1)\n ax.plot(df_disk.index, disk_read_serie / Gb, label=\"Read\")\n\n disk_write_serie = diskIO[disk_write_col].apply(lambda x: sum(x), axis=1)\n ax.plot(df_disk.index, disk_write_serie / Gb, label=\"Write\")\n \n formatter = matplotlib.ticker.FuncFormatter(timeTicks) \n ax.xaxis.set_major_formatter(formatter)\n \n ax.set_ylim([0, max(max_diskIO_transfer, 0.1)])\n plt.title(\"Disk data transfer\")\n plt.ylabel(\"Gb\")\n plt.xlabel(\"Time (seconds)\")\n plt.legend()\n plt.show()\n \n \n###############\n# Network I/O #\n###############\n df_net = df_net.set_index(\"Relative Timestamp\")\n df_net = df_net.dropna(how=\"all\", axis=1)\n net_read_col = [c for c in df_net.columns if c.startswith(\"[NET:\") and c.endswith(\"RxKB\")]\n net_write_col = [c for c in df_net.columns if c.startswith(\"[NET:\") and c.endswith(\"TxKB\")]\n netIO = df_net[net_read_col + net_write_col] * interval_seconds\n \n max_netIO_transfer = netIO.max().max() * 1.1 / Gb\n \n net_data_read = netIO[net_read_col].sum() / Gb\n net_data_write = netIO[net_write_col].sum() / Gb\n \n netIO_time = (sum([1 for row in (netIO >= MIN_NET_TRESHOLD).values if any(row)]) * interval_time) / np.timedelta64(1, 's')\n netIO_read_time = (sum([1 for row in (netIO[net_read_col] >= MIN_NET_TRESHOLD).values if any(row)]) * interval_time) / np.timedelta64(1, 's')\n netIO_write_time = (sum([1 for row in (netIO[net_write_col] >= MIN_NET_TRESHOLD).values if any(row)]) * interval_time) / np.timedelta64(1, 's')\n \n print(f\"\"\"\n====================\n Network I/O\n====================\nData transfer (Gb):\nRead:\n{net_data_read.round(3).to_string().replace(\"KB\", \"GB\")}\n\nWrite:\n{net_data_write.round(3).to_string().replace(\"KB\", \"GB\")}\n\nTotal: {net_data_read.sum()+net_data_write.sum():0.3f}\n\nTotal I/O time (seconds):\n{((netIO >= MIN_NET_TRESHOLD).values.sum() * interval_time) / np.timedelta64(1, 's'):0.3f}\n\nParallel I/O time (seconds):\n{netIO_time:0.3f}\n\"\"\")\n\n fig = plt.figure(figsize=(20,5)) \n ax = fig.add_subplot(111)\n \n net_read_serie = netIO[net_read_col].apply(lambda x: sum(x), axis=1)\n ax.plot(df_net.index, net_read_serie / Gb, label=\"Read\")\n \n net_write_serie = netIO[net_write_col].apply(lambda x: sum(x), axis=1)\n ax.plot(df_net.index, net_write_serie / Gb, label='Write')\n \n formatter = matplotlib.ticker.FuncFormatter(timeTicks) \n ax.xaxis.set_major_formatter(formatter)\n \n ax.set_ylim([0, max(max_netIO_transfer, 0.1)])\n plt.title(\"Network data transfer\")\n plt.ylabel(\"Gb\")\n plt.xlabel(\"Time (seconds)\")\n plt.legend()\n plt.show()\n \n print(f\"\"\"\n====================\n Memory Usage\n====================\n\"\"\")\n \n max_memory = int((df_numa[\"[NUMA:0]Used\"] + df_numa[\"[NUMA:0]Free\"]).mean()) / Gb\n \n fig = plt.figure(figsize=(20,5)) \n ax = fig.add_subplot(111)\n \n ax.axhline(max_memory, color=\"red\", label=\"Total Memory\")\n ax.plot(df_numa[\"Relative Timestamp\"], df_numa[\"[NUMA:0]Used\"] / Gb, label=\"Used\")\n ax.plot(df_numa[\"Relative Timestamp\"], df_numa[\"[NUMA:0]Free\"] / Gb, label=\"Free\")\n \n formatter = matplotlib.ticker.FuncFormatter(timeTicks) \n ax.xaxis.set_major_formatter(formatter)\n ax.get_yaxis().get_major_formatter().set_scientific(False)\n \n ax.set_ylim([0, max_memory * 1.1])\n plt.title(\"Memory Usage\")\n plt.ylabel(\"Gb\")\n plt.xlabel(\"Time (seconds)\")\n plt.legend()\n plt.show()\n\n#######\n# NFS #\n########\n df_tab = df_tab.set_index(\"Relative Timestamp\")\n df_tab = df_tab.dropna(how=\"all\", axis=1)\n nfs_read_col = [c for c in df_tab.columns if c.startswith(\"[NFS]Reads\")]\n nfs_write_col = [c for c in df_tab.columns if c.startswith(\"NFS]Writes\")]\n nfsIO = df_tab[nfs_read_col + nfs_write_col] * interval_seconds\n \n max_nfsIO_transfer = diskIO.max().max() * 1.1 / Gb\n \n nfs_data_read = nfsIO[nfs_read_col].sum() / Gb\n nfs_data_write = nfsIO[nfs_write_col].sum() / Gb\n \n nfsIO_time = (sum([1 for row in (nfsIO >= MIN_NFS_TRESHOLD).values if any(row)]) * interval_time) / np.timedelta64(1, 's')\n nfsIO_read_time = (sum([1 for row in (nfsIO[nfs_read_col] >= MIN_NFS_TRESHOLD).values if any(row)]) * interval_time) / np.timedelta64(1, 's')\n nfsIO_write_time = (sum([1 for row in (nfsIO[nfs_write_col] >= MIN_NFS_TRESHOLD).values if any(row)]) * interval_time) / np.timedelta64(1, 's')\n \n print(f\"\"\"\n====================\n NFS\n====================\nData transfer (Gb):\nRead:\n{nfs_data_read.round(3).to_string().replace(\"KB\", \"GB\")}\n\nWrite:\n{nfs_data_write.round(3).to_string().replace(\"KB\", \"GB\")}\n\nTotal: {nfs_data_read.sum()+nfs_data_write.sum():0.3f}\n\nTotal I/O time (seconds):\n{((nfsIO >= MIN_NFS_TRESHOLD).values.sum() * interval_time) / np.timedelta64(1, 's'):0.3f}\n\nParallel I/O time (seconds):\n{nfsIO_time:0.3f}\n\"\"\")\n \n fig = plt.figure(figsize=(20,5)) \n ax = fig.add_subplot(111)\n \n nfs_read_serie = nfsIO[nfs_read_col].apply(lambda x: sum(x), axis=1)\n ax.plot(df_tab.index, nfs_read_serie / Gb, label=\"Read\")\n\n nfs_write_serie = nfsIO[nfs_write_col].apply(lambda x: sum(x), axis=1)\n ax.plot(df_tab.index, nfs_write_serie / Gb, label=\"Write\")\n \n formatter = matplotlib.ticker.FuncFormatter(timeTicks) \n ax.xaxis.set_major_formatter(formatter)\n \n ax.set_ylim([0, max(max_nfsIO_transfer, 0.1)])\n plt.title(\"NFS data transfer\")\n plt.ylabel(\"Gb\")\n plt.xlabel(\"Time (seconds)\")\n plt.legend()\n plt.show()\n \n\n###########\n# Summary #\n###########\n print(\"\"\"\n====================\n Summary\n====================\n\"\"\")\n other = 0\n donut_labels = []\n donut_ratios = []\n\n \n label_ratio = {\n \"CPU\": cpu_time,\n \"Disk Read\": diskIO_read_time,\n \"Disk Write\": diskIO_write_time,\n \"Network Read\": netIO_read_time,\n \"Network Write\": netIO_write_time,\n \"NFS Read\": nfsIO_read_time,\n \"NFS Write\": nfsIO_write_time,\n }\n total_time = sum([v for k, v in label_ratio.items()])\n \n for label, ratio in label_ratio.items():\n if ratio < total_time * 0.1: # If less than 5% put in 'other' category\n other += ratio\n else:\n donut_ratios.append(ratio)\n donut_labels.append(f\"{label}: {ratio:0.2f} seconds\")\n \n if other > 0:\n donut_ratios.append(other)\n donut_labels.append(f\"Other: {other:0.2f} seconds\")\n \n # Code reference: https://matplotlib.org/3.1.1/gallery/pie_and_polar_charts/pie_and_donut_labels.html\n fig, ax = plt.subplots(figsize=(20, 10), subplot_kw=dict(aspect=\"equal\"))\n wedges, texts = ax.pie(donut_ratios, wedgeprops=dict(width=0.5), startangle=-40)\n\n bbox_props = dict(boxstyle=\"square,pad=0.3\", fc=\"w\", ec=\"k\", lw=0.72)\n kw = dict(arrowprops=dict(arrowstyle=\"-\"),\n bbox=bbox_props, zorder=0, va=\"center\")\n\n for i, p in enumerate(wedges):\n ang = (p.theta2 - p.theta1)/2. + p.theta1\n y = np.sin(np.deg2rad(ang))\n x = np.cos(np.deg2rad(ang))\n horizontalalignment = {-1: \"right\", 1: \"left\"}[int(np.sign(x))]\n connectionstyle = \"angle,angleA=0,angleB={}\".format(ang)\n kw[\"arrowprops\"].update({\"connectionstyle\": connectionstyle})\n ax.annotate(donut_labels[i], xy=(x, y), xytext=(1.35*np.sign(x), 1.4*y),\n horizontalalignment=horizontalalignment, **kw)\n\n ax.set_title(\"Time distribution\")\n\n plt.show()\n "
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.title",
"numpy.timedelta64",
"numpy.sign",
"matplotlib.pyplot.ylabel",
"numpy.deg2rad",
"matplotlib.ticker.FuncFormatter",
"matplotlib.pyplot.xlabel",
"pandas.set_option",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
q1park/tempformer-xl
|
[
"05be261ba2221dd7fd7adc39df78941eaa4937c2"
] |
[
"pytorch/eval.py"
] |
[
"# coding: utf-8\nimport argparse\nimport time\nimport math\nimport os\n\nimport torch\n\nfrom pytorch.utils.data_utils import get_lm_corpus\nfrom pytorch.modules.xlmemories import XlMemories\nfrom utils.exp_utils import get_logger\n\nparser = argparse.ArgumentParser(description='PyTorch Transformer Language Model')\nparser.add_argument('--data', type=str, default='../data/wikitext-103',\n help='location of the data corpus')\nparser.add_argument('--dataset', type=str, default='wt103',\n choices=['wt103', 'lm1b', 'enwik8', 'text8'],\n help='dataset name')\nparser.add_argument('--split', type=str, default='all',\n choices=['all', 'valid', 'test'],\n help='which split to evaluate')\nparser.add_argument('--batch_size', type=int, default=10,\n help='batch size')\nparser.add_argument('--tgt_len', type=int, default=5,\n help='number of tokens to predict')\nparser.add_argument('--ext_len', type=int, default=0,\n help='length of the extended context')\nparser.add_argument('--mem_len', type=int, default=0,\n help='length of the retained previous heads')\nparser.add_argument('--clamp_len', type=int, default=-1,\n help='max positional embedding index')\nparser.add_argument('--cuda', action='store_true',\n help='use CUDA')\nparser.add_argument('--work_dir', type=str, required=True,\n help='path to the work_dir')\nparser.add_argument('--no_log', action='store_true',\n help='do not log the eval result')\nparser.add_argument('--same_length', action='store_true',\n help='set same length attention with masking')\nargs = parser.parse_args()\nassert args.ext_len >= 0, 'extended context length must be non-negative'\n\ndevice = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\n# Get logger\nlogging = get_logger(os.path.join(args.work_dir, 'log.txt'),\n log_=not args.no_log)\n\n# Load dataset\ncorpus = get_lm_corpus(args.data, args.dataset)\nntokens = len(corpus.vocab)\n\nva_iter = corpus.get_iterator('valid', args.batch_size, args.tgt_len,\n device=device, ext_len=args.ext_len)\nte_iter = corpus.get_iterator('test', args.batch_size, args.tgt_len,\n device=device, ext_len=args.ext_len)\n\n# Load the best saved model.\nwith open(os.path.join(args.work_dir, 'model.pt'), 'rb') as f:\n model = torch.load(f)\n\nmodel = model.to(device)\n\n\n\nlogging('Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}'.format(\n args.batch_size, args.tgt_len, args.ext_len, args.mem_len, args.clamp_len))\n\nif args.clamp_len > 0:\n model.position.clamp_len = args.clamp_len\nif args.same_length:\n model.attn_mask.same_length = True\n model.attn_mask.tgt_len = args.tgt_len\n model.attn_mask.klen = args.tgt_len + args.mem_len\n model.attn_mask.attn_mask = model.attn_mask.make_mask()\n\n###############################################################################\n# Evaluation code\n###############################################################################\ndef evaluate(eval_iter):\n # Turn on evaluation mode which disables dropout.\n model.eval()\n total_len, total_loss = 0, 0.\n start_time = time.time()\n with torch.no_grad():\n eval_memories = XlMemories(\n n_stream=1,\n n_layer=model.n_layer,\n tgt_len=args.tgt_len,\n mem_len=args.mem_len,\n ext_len=args.ext_len,\n dtype=next(model.parameters()).dtype\n )\n\n for idx, (data, target, seq_len) in enumerate(eval_iter):\n loss, new_eval_memory = model(data, target, eval_memories[0])\n eval_memories.update_memory_stream(stream_index=0, memory=new_eval_memory)\n\n loss = loss.mean()\n total_loss += seq_len * loss.item()\n total_len += seq_len\n total_time = time.time() - start_time\n logging('Time : {:.2f}s, {:.2f}ms/segment'.format(\n total_time, 1000 * total_time / (idx+1)))\n return total_loss / total_len\n\n# Run on test data.\nif args.split == 'all':\n test_loss = evaluate(te_iter)\n valid_loss = evaluate(va_iter)\nelif args.split == 'valid':\n valid_loss = evaluate(va_iter)\n test_loss = None\nelif args.split == 'test':\n test_loss = evaluate(te_iter)\n valid_loss = None\n\ndef format_log(loss, split):\n if args.dataset in ['enwik8', 'text8']:\n log_str = '| {0} loss {1:5.2f} | {0} bpc {2:9.5f} '.format(\n split, loss, loss / math.log(2))\n else:\n log_str = '| {0} loss {1:5.2f} | {0} ppl {2:9.3f} '.format(\n split, loss, math.exp(loss))\n return log_str\n\nlog_str = ''\nif valid_loss is not None:\n log_str += format_log(valid_loss, 'valid')\nif test_loss is not None:\n log_str += format_log(test_loss, 'test')\n\nlogging('=' * 100)\nlogging(log_str)\nlogging('=' * 100)\n"
] |
[
[
"torch.device",
"torch.no_grad",
"torch.load"
]
] |
ernovoseller/DuelingPosteriorSampling
|
[
"0b34db67bd20d664f73611608638e1e0a32faf30"
] |
[
"PSRL_in_RiverSwim.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nRun the posterior sampling RL algorithm (PSRL) in the RiverSwim environment.\n\nThe PSRL algorithm is described in \"(More) Efficient Reinforcement Learning via \nPosterior Sampling,\" by I. Osband, B. Van Roy, and D. Russo (2013). It learns \nfrom numerical rewards, rather than preferences.\n\"\"\"\n\nimport scipy.io as io\nimport os\n\nfrom Envs.RiverSwim import RiverSwimEnv\nfrom Learning_algorithms.PSRL_numerical_rewards import PSRL\n\n\n# Define constants:\ntime_horizon = 50\nnum_iter = 400 # Number of iterations of the learning algorithm. This is \n # twice the number of iterations of the preference-based\n # algorithms, since PSRL rolls out one trajectory/episode\n # per learning iteration, while the preference-based \n # algorithms roll out two; thus, the number of episodes is\n # kept consistent.\n\nrun_nums = 100 # Number of times to run the algorithm\n\n# Folder for saving results:\noutput_folder = 'PSRL/'\n\nif not os.path.isdir(output_folder):\n os.mkdir(output_folder)\n\n# Hyperparameters to use for the PSRL algorithm:\nNG_params = [1, 1, 1, 1] # Prior parameters for normal-gamma reward model\n\n# Instantiate the environment:\nenv = RiverSwimEnv() \n\n# Run PSRL algorithm:\nfor run_num in range(run_nums): \n \n # String to use in status updates:\n run_str = '%s, run %i' % (NG_params, run_num)\n \n # Run algorithm:\n rewards = PSRL(time_horizon, NG_params, env, num_iter, run_str = run_str)\n \n # Save results from this algorithm run:\n output_filename = output_folder + 'Iter_' + str(num_iter) + '_params_' + \\\n str(NG_params[0]) + '_' + str(NG_params[1]) \\\n + '_' + str(NG_params[2]) + '_' + str(NG_params[3]) + \\\n '_run_' + str(run_num) + '.mat'\n \n io.savemat(output_filename, {'rewards': rewards, 'num_iter': num_iter, \n 'NG_params': NG_params})\n \n"
] |
[
[
"scipy.io.savemat"
]
] |
liqibp/rogerrferi
|
[
"4478f71ff44a9f5b17a52becf41f20caf97afab7"
] |
[
"Leitor_XML_v0.7 (GUI).py"
] |
[
"# Ler e Importar XML, e exportar para base MSSQL \n#\n# Importando os pacotes\nimport pandas as pd\nimport sqlalchemy\nimport pyodbc\nimport xml.etree.ElementTree as et\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter.filedialog import askdirectory \nfrom tkinter import messagebox\nimport os\nimport time\nimport glob\nimport math as m\n#import re\n#from concurrent.futures import ThreadPoolExecutor\n\nclass Leitor:\n def __init__(self,master):\n self.master = master\n master.title('Leitor XML v0.7')\n\n # Variáveis auxiliares\n self.ns = {'aux' : 'http://www.portalfiscal.inf.br/nfe'}\n self.tnome = 'NFe_Base'\n self.listad = ['SQL Server Native Client 11.0','SQL Server Native Client 10.0','ODBC Driver 13 for SQL Server']\n self.arquivos = 0\n\n self.driver = tk.StringVar(master)\n self.driver.set(self.listad[0])\n\n self.statbar = tk.StringVar(master)\n self.statbar.set('')\n\n self.cb1 = tk.IntVar()\n self.cb1.set(1)\n\n self.cb2 = tk.IntVar()\n self.cb2.set(0)\n\n self.lcaminho = tk.Label(text='Insira o caminho dos arquivos XML:')\n self.lcaminho.grid(row=0)\n\n self.pathinsert = tk.Entry(root, width=80, borderwidth=2)\n self.pathinsert.grid(row=1) \n\n self.bpath = tk.Button(master, text='Pesquisar', command=self.get_cam, padx=5)\n self.bpath.grid(row=1,column=1)\n\n self.lserv = tk.Label(text='Insira o Servidor SQL:')\n self.lserv.grid(row=3)\n\n self.servinsert = tk.Entry(master, width=80, borderwidth=2)\n self.servinsert.grid(row=4)\n self.servinsert.insert(0,os.environ['COMPUTERNAME']) \n\n self.checkbox2= tk.Checkbutton(master, text='SQL Express', variable=self.cb2,command=self.set_express)\n self.checkbox2.grid(row=4,column=1)\n\n self.lbase = tk.Label(text='Insira a base para inserir os dados:')\n self.lbase.grid(row=5)\n\n self.baseinsert = tk.Entry(master, width=80, borderwidth=2)\n self.baseinsert.grid(row=6)\n self.baseinsert.insert(0,'DB_XML') \n\n self.checkbox1= tk.Checkbutton(master, text='Substituir?', variable=self.cb1)\n self.checkbox1.grid(row=6,column=1)\n\n self.dpadrao = tk.Label(text='Driver:')\n self.dpadrao.grid(row=9)\n\n self.Lista = ttk.Combobox(master,values=self.listad,textvariable=self.driver)\n self.Lista.config(width=78)\n self.Lista.grid(row=10)\n\n self.barprog = ttk.Progressbar(master,orient='horizontal',length=490,mode='determinate')\n self.barprog.grid(row=12,pady=10)\n\n self.status = tk.Label(textvariable=self.statbar)\n self.status.grid(row=13)\n\n self.bconnect = tk.Button(master, text='Importar XML', command=self.sql_connect)\n self.bconnect.grid(row=15,pady=10)\n #Abre janela do windows para usuário escolher caminho da pasta\n def get_cam(self):\n global directory\n global path\n global arquivos \n path = askdirectory()\n directory = os.fsencode(path)\n arquivos = len(glob.glob1(path,'*.xml'))\n self.pathinsert.delete(0,tk.END)\n self.pathinsert.insert(0,path)\n \n #adiciona /sqlexpress no fim do servidor\n def set_express(self):\n if self.cb2.get() == 1:\n servidorexp = self.servinsert.get()\n self.servinsert.delete(0,tk.END)\n self.servinsert.insert(0,servidorexp+str('\\\\sqlexpress'))\n self.servinsert.update()\n elif self.cb2.get() == 0:\n servidornexp = self.servinsert.get()\n servidornexp = servidornexp.replace('\\\\sqlexpress','')\n self.servinsert.delete(0,tk.END)\n self.servinsert.insert(0,servidornexp)\n self.servinsert.update()\n \n #Remove NFe canceladas encontradas durante a leitura do DF\n def remover_canc(self):\n global all_xml\n global canc_xml\n global df_all_xml\n global df_canc_xml\n df_all_xml = pd.DataFrame(all_xml)\n del all_xml\n df_canc_xml = pd.DataFrame(canc_xml)\n del canc_xml\n if df_canc_xml.empty == False:\n df_all_xml = pd.merge(df_all_xml,df_canc_xml, indicator=True,left_on='chNFe',right_on='canc_chNFe', how='left').query('_merge==\"left_only\"').drop(['_merge','canc_chNFe'], axis = 1)\n df_canc_xml.to_csv(os.path.join(path,'Cancelados.txt'),header = True,index = False,sep = '\\t')\n \n #Pondera data média do vencimento de acordo com o valor e prazo de cada duplicata\n def ponderar_venc(self):\n global df_all_xml\n global venc_xml\n df_venc_xml = pd.DataFrame(venc_xml)\n del venc_xml\n if df_canc_xml.empty == False: \n df_venc_xml['dhEmi'] = pd.to_datetime(df_venc_xml['dhEmi'])\n df_venc_xml['dVenc'] = pd.to_datetime(df_venc_xml['dVenc'])\n df_venc_xml['vDup'] = pd.to_numeric(df_venc_xml['vDup'])\n df_venc_xml['dias'] = (df_venc_xml['dVenc'] - df_venc_xml['dhEmi']).dt.days\n df_venc_xml['vDup_x_Dias'] = (df_venc_xml['vDup'] * df_venc_xml['dias'])\n df_venc_pond = df_venc_xml.filter(['chNFe_venc','dhEmi','vDup','vDup_x_Dias'])\n del df_venc_xml\n df_venc_pond = df_venc_pond.groupby(['chNFe_venc','dhEmi']).sum()\n df_venc_pond = df_venc_pond.reset_index() \n df_venc_pond['dPond'] = (df_venc_pond['vDup_x_Dias'] / df_venc_pond['vDup']).astype('int') \n df_venc_pond['dVenc'] = df_venc_pond['dhEmi'] + pd.to_timedelta(df_venc_pond['dPond'], unit='days')\n df_all_xml = pd.merge(df_all_xml,df_venc_pond[['chNFe_venc','dVenc']], left_on='chNFe', right_on = 'chNFe_venc', how='left').drop('chNFe_venc', axis = 1)\n\n\n #Connecta com o SQL Server utilizando o driver indicado na GUI\n def sql_connect(self):\n global engine\n global df_all_xml\n global df_canc_xml\n try:\n start = time.time()\n servidor = self.servinsert.get()\n sqldriver = ''\n if self.Lista.get() == 'SQL Server Native Client 11.0':\n sqldriver = 'SQL+Server+Native+Client+11.0'\n elif self.Lista.get() == 'SQL Server Native Client 10.0':\n sqldriver = 'SQL+Server+Native+Client+10.0'\n else:\n sqldriver = 'ODBC+Driver+13+for+SQL+Server'\n engine = sqlalchemy.create_engine('mssql://'+servidor+'/'+self.baseinsert.get()+'?driver='+sqldriver+'&trusted_connection=yes',fast_executemany=True)\n engine.connect()\n if arquivos == 0:\n raise Exception('Nenhum arquivo .xml foi encontrado na pasta')\n else:\n self.criar_tabela()\n self.ler(directory,arquivos)\n self.remover_canc()\n self.ponderar_venc()\n self.export()\n end = time.time()\n horas = m.floor(((end-start)/3600))\n minutos = m.floor(((end-start)%3600)/60)\n segundos = m.floor((end-start)%60)\n self.statbar.set(str(len(df_all_xml))+' linhas inseridas\\n'+'Tempo total: '+str(horas)+' hora(s) '+str(minutos)+' minuto(s) '+str(segundos)+ ' segundos.')\n messagebox.showinfo('Fim!','Importação finalizada!')\n del df_all_xml\n del df_canc_xml\n except Exception as e:\n #messagebox.showerror('Erro',e)\n raise\n\n #Cria tabela no servidor SQL para insersão dos dados\n def criar_tabela(self):\n if self.cb1.get() == 1:\n with engine.connect() as con:\n con.execute('DROP TABLE IF EXISTS '+self.tnome)\n con.execute('''\n CREATE TABLE [dbo].['''+self.tnome+'''](\n [index] [bigint] NOT NULL IDENTITY(1,1) PRIMARY KEY,\n [Arq_ID] [bigint] NULL,\n [Itm_ID] [bigint] NULL,\n [chNFe] [varchar](44) NULL,\n [cUF] [varchar](2) NULL,\n [natOP] [varchar](max) NULL,\n [serie] [int] NULL,\n [nNF] [int] NULL,\n [dhEmi] [date] NULL,\n [dhSaiEnt] [date] NULL,\n [dVenc] [date] NULL,\n [tpNF] [int] NULL,\n [refNFe] [varchar](44) NULL,\n [finNFe] [int] NULL,\n [Emit_CNPJ] [varchar](14) NULL,\n [Emit_xNome] [varchar](200) NULL,\n [Emit_xFant] [varchar](200) NULL,\n [Emit_UF] [varchar](2) NULL,\n [Emit_cPais] [varchar](4) NULL,\n [Emit_xPais] [varchar](100) NULL,\n [Emit_IE] [varchar](14) NULL,\n [Dest_CNPJ] [varchar](14) NULL,\n [Dest_CPF] [varchar](11) NULL,\n [Dest_xNome] [varchar](200) NULL,\n [Dest_xFant] [varchar](200) NULL,\n [Dest_UF] [varchar](2) NULL,\n [Dest_cPais] [varchar](4) NULL,\n [Dest_xPais] [varchar](100) NULL,\n [Dest_IE] [varchar](14) NULL,\n [nItem] [bigint] NULL,\n [cProd] [varchar](200) NULL,\n [xProd] [varchar](500) NULL,\n [NCM] [varchar](8) NULL,\n [CFOP] [varchar](4) NULL,\n [qCom] [numeric](24, 12) NULL,\n [uCom] [varchar](6) NULL,\n [vProd] [numeric](24, 12) NULL,\n [vFrete] [numeric](24, 12) NULL,\n [vSeg] [numeric](24, 12) NULL,\n [vDesc] [numeric](24, 12) NULL,\n [vOutro] [numeric](24, 12) NULL,\n [nDI] [varchar](12) NULL,\n [dDI] [date] NULL,\n [xPed] [varchar](15) NULL,\n [orig] [varchar](1) NULL,\n [CST_ICMS] [varchar](2) NULL,\n [vICMS] [numeric](24, 12) NULL,\n [vICMSST] [numeric](24, 12) NULL,\n [vICMSDeson] [numeric](24, 12) NULL, \n [CST_IPI] [varchar](2) NULL,\n [vIPI] [numeric](24, 12) NULL,\n [vBC_II] [numeric](24, 12) NULL,\n [vDespAdu] [numeric](24,12) NULL,\n [vII] [numeric](24, 12) NULL,\n [CST_PIS] [varchar](2) NULL,\n [vBC_PIS] [numeric](24, 12) NULL,\n [pPIS] [numeric](24, 12) NULL,\n [vPIS] [numeric](24, 12) NULL,\n [CST_COFINS] [varchar](2) NULL,\n [vBC_COFINS] [numeric](24, 12) NULL,\n [pCOFINS] [numeric](24, 12) NULL,\n [vCOFINS] [numeric](24, 12) NULL,\n [vFCPUFDest] [numeric](24, 12) NULL,\n [vICMSUFDest] [numeric](24, 12) NULL,\n [vICMSUFRemet] [numeric](24, 12) NULL,\n [vISSQN] [numeric](24, 12) NULL,\n [infCpl] [varchar](5000) NULL,\n [infAdFisco] [varchar](5000) NULL\n )\n ''')\n else:\n with engine.connect() as con2:\n if not engine.dialect.has_table(engine,self.tnome):\n con2.execute('''\n CREATE TABLE [dbo].['''+self.tnome+'''](\n [index] [bigint] NOT NULL IDENTITY(1,1) PRIMARY KEY,\n [Arq_ID] [bigint] NULL,\n [Itm_ID] [bigint] NULL,\n [chNFe] [varchar](44) NULL,\n [cUF] [varchar](2) NULL,\n [natOP] [varchar](max) NULL,\n [serie] [int] NULL,\n [nNF] [int] NULL,\n [dhEmi] [date] NULL,\n [dhSaiEnt] [date] NULL,\n [dVenc] [date] NULL,\n [tpNF] [int] NULL,\n [refNFe] [varchar](44) NULL,\n [finNFe] [int] NULL,\n [Emit_CNPJ] [varchar](14) NULL,\n [Emit_xNome] [varchar](200) NULL,\n [Emit_xFant] [varchar](200) NULL,\n [Emit_UF] [varchar](2) NULL,\n [Emit_cPais] [varchar](4) NULL,\n [Emit_xPais] [varchar](100) NULL,\n [Emit_IE] [varchar](14) NULL,\n [Dest_CNPJ] [varchar](14) NULL,\n [Dest_CPF] [varchar](11) NULL,\n [Dest_xNome] [varchar](200) NULL,\n [Dest_xFant] [varchar](200) NULL,\n [Dest_UF] [varchar](2) NULL,\n [Dest_cPais] [varchar](4) NULL,\n [Dest_xPais] [varchar](100) NULL,\n [Dest_IE] [varchar](14) NULL,\n [nItem] [bigint] NULL,\n [cProd] [varchar](200) NULL,\n [xProd] [varchar](500) NULL,\n [NCM] [varchar](8) NULL,\n [CFOP] [varchar](4) NULL,\n [qCom] [numeric](24, 12) NULL,\n [uCom] [varchar](6) NULL,\n [vProd] [numeric](24, 12) NULL,\n [vFrete] [numeric](24, 12) NULL,\n [vSeg] [numeric](24, 12) NULL,\n [vDesc] [numeric](24, 12) NULL,\n [vOutro] [numeric](24, 12) NULL,\n [nDI] [varchar](12) NULL,\n [dDI] [date] NULL,\n [xPed] [varchar](15) NULL,\n [orig] [varchar](1) NULL,\n [CST_ICMS] [varchar](2) NULL,\n [vICMS] [numeric](24, 12) NULL,\n [vICMSST] [numeric](24, 12) NULL,\n [vICMSDeson] [numeric](24, 12) NULL, \n [CST_IPI] [varchar](2) NULL,\n [vIPI] [numeric](24, 12) NULL,\n [vBC_II] [numeric](24, 12) NULL,\n [vDespAdu] [numeric](24,12) NULL,\n [vII] [numeric](24, 12) NULL,\n [CST_PIS] [varchar](2) NULL,\n [vBC_PIS] [numeric](24, 12) NULL,\n [pPIS] [numeric](24, 12) NULL,\n [vPIS] [numeric](24, 12) NULL,\n [CST_COFINS] [varchar](2) NULL,\n [vBC_COFINS] [numeric](24, 12) NULL,\n [pCOFINS] [numeric](24, 12) NULL,\n [vCOFINS] [numeric](24, 12) NULL,\n [vFCPUFDest] [numeric](24, 12) NULL,\n [vICMSUFDest] [numeric](24, 12) NULL,\n [vICMSUFRemet] [numeric](24, 12) NULL,\n [vISSQN] [numeric](24, 12) NULL,\n [infCpl] [varchar](5000) NULL,\n [infAdFisco] [varchar](5000) NULL\n )\n ''')\n \n #Lê os XML da pasta e tabula as informações em uma lista de dicionários\n def ler(self,diretorio,narquivos):\n global all_xml\n global canc_xml\n global venc_xml\n all_xml = []\n canc_xml = []\n venc_xml = []\n lidos = 0\n self.barprog['value']=0\n self.barprog.update()\n self.barprog['maximum'] = arquivos\n for file in os.listdir(diretorio):\n if file.endswith(b'.xml') or file.endswith(b'.XML'):\n lidos = lidos + 1 \n self.barprog['value']=lidos\n self.barprog.update()\n #print(file)\n xroot = et.parse(os.path.join(diretorio, file),parser=et.XMLParser(encoding=\"iso-8859-5\"))\n xtree = xroot.getroot()\n self.statbar.set(str(lidos)+' / '+str(narquivos)+' arquivos lidos')\n if xtree.tag == '{http://www.portalfiscal.inf.br/nfe}nfeProc':\n nItem = 0\n chNFe = xtree.find('aux:NFe/aux:infNFe',self.ns).attrib['Id'][3:]\n for ides in xroot.findall('aux:NFe/aux:infNFe/aux:ide',self.ns):\n cUF = ides.find('aux:cUF',self.ns).text\n natOp = ides.find('aux:natOp',self.ns).text\n serie = ides.find('aux:serie',self.ns).text\n nNF = ides.find('aux:nNF',self.ns).text\n dhEmi_t = ides.find('aux:dhEmi',self.ns)\n if dhEmi_t is not None:\n dhEmi = ides.find('aux:dhEmi',self.ns).text[0:10]\n else: \n dhEmi = ides.find('aux:dEmi',self.ns).text[0:10]\n dhSaiEnt_t = ides.find('aux:dhSaiEnt',self.ns)\n dSaiEnt_t = ides.find('aux:dSaiEnt',self.ns) \n if dhSaiEnt_t is not None:\n dhSaiEnt = ides.find('aux:dhSaiEnt',self.ns).text[0:10]\n elif dSaiEnt_t is not None: \n dhSaiEnt = ides.find('aux:dSaiEnt',self.ns).text[0:10]\n else:\n dhSaiEnt = None \n tpNF = ides.find('aux:tpNF',self.ns).text\n try:\n refNFe = ides.find('aux:refNFe',self.ns).text\n except: \n refNFe = None \n finNFe = ides.find('aux:finNFe',self.ns).text\n for emits in xroot.findall('aux:NFe/aux:infNFe/aux:emit',self.ns):\n try:\n Emit_CNPJ = emits.find('aux:CNPJ',self.ns).text\n except: \n Emit_CNPJ = None\n Emit_xNome = emits.find('aux:xNome',self.ns).text \n try:\n Emit_xFant = emits.find('aux:xFant',self.ns).text\n except: \n Emit_xFant = None\n try:\n Emit_xPais = emits.find('.*/aux:xPais',self.ns).text\n except:\n Emit_xPais = None \n try:\n Emit_cPais = emits.find('.*/aux:cPais',self.ns).text\n except:\n Emit_cPais = None \n Emit_UF = emits.find('.*/aux:UF',self.ns).text\n try:\n Emit_IE = emits.find('aux:IE',self.ns).text\n except: \n Emit_IE = None \n for dests in xroot.findall('aux:NFe/aux:infNFe/aux:dest',self.ns):\n try:\n Dest_CNPJ = dests.find('aux:CNPJ',self.ns).text\n except: \n Dest_CNPJ = None\n try:\n Dest_CPF = dests.find('aux:CPF',self.ns).text\n except: \n Dest_CPF = None\n Dest_xNome = dests.find('aux:xNome',self.ns).text \n try:\n Dest_xFant = dests.find('aux:xFant',self.ns).text\n except: \n Dest_xFant = None\n try:\n Dest_xPais = dests.find('.*/aux:xPais',self.ns).text\n except:\n Dest_xPais = None \n try:\n Dest_cPais = dests.find('.*/aux:cPais',self.ns).text\n except:\n Dest_cPais = None \n Dest_UF = dests.find('.*/aux:UF',self.ns).text\n try:\n Dest_IE = dests.find('aux:IE',self.ns).text\n except: \n Dest_IE = None\n for infadics in xroot.findall('aux:NFe/aux:infNFe/aux:infAdic',self.ns):\n try:\n infCpl = infadics.find('aux:infCpl',self.ns).text\n except: \n infCpl = None\n try:\n infAdFisco = infadics.find('aux:infAdFisco',self.ns).text\n except: \n infAdFisco = None\n for itens in xroot.findall('aux:NFe/aux:infNFe/aux:det',self.ns): \n nItem = nItem + 1 \n cProd = itens.find('.*/aux:cProd',self.ns).text \n xProd = itens.find('.*/aux:xProd',self.ns).text \n NCM = itens.find('.*/aux:NCM',self.ns).text\n CFOP = itens.find('.*/aux:CFOP',self.ns).text\n qCom = itens.find('.*/aux:qCom',self.ns).text\n uCom = itens.find('.*/aux:uCom',self.ns).text\n vProd = itens.find('.*/aux:vProd',self.ns).text\n try:\n vFrete = itens.find('.*/aux:vFrete',self.ns).text\n except: \n vFrete = 0.00\n try:\n vSeg = itens.find('.*/aux:vSeg',self.ns).text\n except: \n vSeg = 0.00\n try:\n vDesc = itens.find('.*/aux:vDesc',self.ns).text\n except: \n vDesc = 0.00\n try:\n vOutro = itens.find('.*/aux:vOutro',self.ns).text\n except: \n vOutro = 0.00\n try:\n nDI =itens.find('.*//aux:nDI',self.ns).text\n except: \n nDI = None \n try:\n dDI = itens.find('.*//aux:dDI',self.ns).text\n except: \n dDI = None \n try:\n xPed = itens.find('.*//aux:xPed',self.ns).text\n except:\n xPed = None \n try:\n orig = itens.find('.*//aux:ICMS//aux:orig',self.ns).text\n except:\n orig = None\n try:\n CST_ICMS = itens.find('.*//aux:ICMS//aux:CST',self.ns).text\n except:\n CST_ICMS = None \n try:\n vICMS = itens.find('.*//aux:ICMS//aux:vICMS',self.ns).text\n except: \n vICMS = 0.00\n try:\n vICMSST = itens.find('.*//aux:ICMS//aux:vICMSST',self.ns).text\n except: \n vICMSST = 0.00\n try: \n vICMSDeson = itens.find('.*//aux:ICMS//aux:vICMSDeson',self.ns).text\n except:\n vICMSDeson = 0.00 \n try:\n CST_IPI = itens.find('.*//aux:IPI//aux:CST',self.ns).text\n except:\n CST_IPI = None \n try:\n vIPI = itens.find('.*//aux:IPI//aux:vIPI',self.ns).text\n except: \n vIPI = 0.00 \n try:\n vBC_II = itens.find('.*//aux:II//aux:vBC',self.ns).text\n except: \n vBC_II = 0.00\n try:\n vDespAdu = itens.find('.*//aux:II//aux:vDespAdu',self.ns).text\n except: \n vDespAdu = 0.00\n try:\n vII = itens.find('.*//aux:II//aux:vII',self.ns).text\n except: \n vII = 0.00\n try:\n CST_PIS = itens.find('.*//aux:PIS//aux:CST',self.ns).text\n except: \n CST_PIS = None\n try:\n vBC_PIS = itens.find('.*//aux:PIS//aux:vBC',self.ns).text\n except: \n vBC_PIS = 0.00\n try:\n pPIS = itens.find('.*//aux:PIS//aux:pPIS',self.ns).text\n except: \n pPIS = 0.00 \n try:\n vPIS = itens.find('.*//aux:PIS//aux:vPIS',self.ns).text\n except: \n vPIS = 0.00\n try:\n CST_COFINS = itens.find('.*//aux:COFINS//aux:CST',self.ns).text\n except: \n CST_COFINS = None\n try:\n vBC_COFINS = itens.find('.*//aux:COFINS//aux:vBC',self.ns).text\n except: \n vBC_COFINS = 0.00\n try:\n pCOFINS = itens.find('.*//aux:COFINS//aux:pCOFINS',self.ns).text\n except: \n pCOFINS = 0.00 \n try:\n vCOFINS = itens.find('.*//aux:COFINS//aux:vCOFINS',self.ns).text\n except: \n vCOFINS = 0.00 \n try:\n vFCPUFDest = itens.find('.*//aux:ICMSUFDest//aux:vFCPUFDest',self.ns).text\n except: \n vFCPUFDest = 0.00\n try:\n vICMSUFDest = itens.find('.*//aux:ICMSUFDest//aux:vICMSUFDest',self.ns).text\n except: \n vICMSUFDest = 0.00\n try:\n vICMSUFRemet = itens.find('.*//aux:ICMSUFDest//aux:vICMSUFRemet',self.ns).text\n except: \n vICMSUFRemet = 0.00\n try:\n vISSQN = itens.find('.*//aux:ISSQN//aux:vISSQN',self.ns).text\n except: \n vISSQN = 0.00 \n\n leitura = {'Arq_ID':lidos,'Itm_ID':nItem,'chNFe':chNFe,'cUF':cUF,'natOP':natOp,'serie':serie,'nNF':nNF,'dhEmi':dhEmi,'dhSaiEnt':dhSaiEnt,\n 'tpNF':tpNF,'refNFe':refNFe,'finNFe':finNFe,'Emit_CNPJ':Emit_CNPJ,'Emit_xNome':Emit_xNome,'Emit_xFant':Emit_xFant,\n 'Emit_UF':Emit_UF,'Emit_cPais':Emit_cPais,'Emit_xPais':Emit_xPais,'Emit_IE':Emit_IE,'Dest_CNPJ':Dest_CNPJ,\n 'Dest_CPF':Dest_CPF,'Dest_xNome':Dest_xNome,'Dest_xFant':Dest_xFant, 'Dest_UF':Dest_UF,'Dest_cPais':Dest_cPais,\n 'Dest_xPais':Dest_xPais,'Dest_IE':Dest_IE,'nItem':nItem,'cProd':cProd,'xProd':xProd,'NCM':NCM,'CFOP':CFOP,'qCom':qCom,\n 'uCom':uCom,'vProd':vProd,'vFrete':vFrete,'vSeg':vSeg,'vDesc':vDesc,'vOutro':vOutro,'nDI':nDI,'dDI':dDI,'xPed':xPed,\n 'orig':orig,'CST_ICMS':CST_ICMS,'vICMS':vICMS,'vICMSST':vICMSST,'vICMSDeson':vICMSDeson,'CST_IPI':CST_IPI,'vIPI':vIPI,'vBC_II':vBC_II,\n 'vDespAdu':vDespAdu,'vII':vII,'CST_PIS':CST_PIS,'vBC_PIS':vBC_PIS,'pPIS':pPIS,'vPIS':vPIS,'CST_COFINS':CST_COFINS,\n 'vBC_COFINS':vBC_COFINS,'pCOFINS':pCOFINS,'vCOFINS':vCOFINS,'vFCPUFDest':vFCPUFDest,'vICMSUFDest':vICMSUFDest,\n 'vICMSUFRemet':vICMSUFRemet,'vISSQN':vISSQN,'infCpl':infCpl,'infAdFisco':infAdFisco} \n \n all_xml.append(leitura) \n \n for dups in xroot.findall('aux:NFe/aux:infNFe/aux:cobr/aux:dup',self.ns):\n try:\n dVenc = dups.find('aux:dVenc',self.ns).text\n except:\n dVenc = None\n try: \n vDup = dups.find('aux:vDup',self.ns).text\n except:\n vDup = None\n\n vencimento = {'chNFe_venc':chNFe,'dhEmi':dhEmi,'dVenc':dVenc,'vDup':vDup}\n venc_xml.append(vencimento)\n \n\n \n elif xtree.tag == '{http://www.portalfiscal.inf.br/nfe}procEventoNFe':\n descevento_t = xtree.find('.*//aux:descEvento',self.ns)\n if descevento_t is not None:\n if xtree.find('.*//aux:descEvento',self.ns).text == 'Cancelamento':\n canc_chNFe = xtree.find('.*//aux:chNFe',self.ns).text\n canceladas = {'canc_chNFe':canc_chNFe}\n canc_xml.append(canceladas)\n else:\n continue\n else:\n continue \n else:\n continue\n else:\n continue\n \n #Define chunks para insersão dos dados no servidor SQL\n def chunker(self,seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))\n \n #Exporta os dados para o servidor SQL, na quantidade de linhas de definidas no chunker por vez\n def export(self):\n global df_all_xml\n chunksize = 1000\n self.barprog['value']=0\n self.barprog.update()\n self.barprog['maximum'] = len(df_all_xml)\n for i,call_xml in enumerate(self.chunker(df_all_xml, chunksize)):\n self.statbar.set(str(i*chunksize)+' linhas inseridas')\n call_xml.to_sql(name=self.tnome, con=engine, if_exists='append', index=False,\n dtype={'chNFe':sqlalchemy.types.VARCHAR(length=44),\n 'cUF':sqlalchemy.types.VARCHAR(length=2),\n 'natOP':sqlalchemy.types.VARCHAR(),\n 'serie':sqlalchemy.types.INTEGER(),\n 'nNF':sqlalchemy.types.INTEGER(),\n 'dhEmi':sqlalchemy.types.Date(),\n 'dhSaiEnt':sqlalchemy.types.Date(),\n 'dVenc':sqlalchemy.types.Date(),\n 'tpNF':sqlalchemy.types.INTEGER(),\n 'refNFe':sqlalchemy.types.VARCHAR(length=44),\n 'finNFe':sqlalchemy.types.INTEGER(),\n 'Emit_CNPJ':sqlalchemy.types.VARCHAR(length=14),\n 'Emit_xNome':sqlalchemy.types.VARCHAR(length=200),\n 'Emit_xFant':sqlalchemy.types.VARCHAR(length=200),\n 'Emit_UF':sqlalchemy.types.VARCHAR(length=2),\n 'Emit_cPais':sqlalchemy.types.VARCHAR(length=4),\n 'Emit_xPais':sqlalchemy.types.VARCHAR(length=100),\n 'Emit_IE':sqlalchemy.types.VARCHAR(length=14),\n 'Dest_CNPJ':sqlalchemy.types.VARCHAR(length=14),\n 'Dest_CPF':sqlalchemy.types.VARCHAR(length=11),\n 'Dest_xNome':sqlalchemy.types.VARCHAR(length=200),\n 'Dest_xFant':sqlalchemy.types.VARCHAR(length=200),\n 'Dest_UF':sqlalchemy.types.VARCHAR(length=2),\n 'Dest_cPais':sqlalchemy.types.VARCHAR(length=4),\n 'Dest_xPais':sqlalchemy.types.VARCHAR(length=100),\n 'Dest_IE':sqlalchemy.types.VARCHAR(length=14),\n 'cProd':sqlalchemy.types.VARCHAR(length=200),\n 'xProd':sqlalchemy.types.VARCHAR(length=500),\n 'NCM':sqlalchemy.types.VARCHAR(length=8),\n 'CFOP':sqlalchemy.types.VARCHAR(length=4),\n 'qCom':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'uCom':sqlalchemy.types.VARCHAR(length=6),\n 'vProd':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vFrete':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vSeg':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vDesc':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vOutro':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'nDI':sqlalchemy.types.VARCHAR(length=12),\n 'dDI':sqlalchemy.types.Date(),\n 'xPed':sqlalchemy.types.VARCHAR(length=15),\n 'orig':sqlalchemy.types.VARCHAR(length=1),\n 'CST_ICMS':sqlalchemy.types.VARCHAR(length=2),\n 'vICMS':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vICMSST':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vICMSDeson':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'CST_IPI':sqlalchemy.types.VARCHAR(length=2),\n 'vIPI':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vBC_II':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vDespAdu_II':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vII':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'CST_PIS':sqlalchemy.types.VARCHAR(length=2),\n 'vBC_PIS':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'pPIS':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vPIS':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'CST_COFINS':sqlalchemy.types.VARCHAR(length=2),\n 'vBC_COFINS':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'pCOFINS':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vCOFINS':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vFCPUFDest':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vICMSUFDest':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vICMSUFRemet':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'vISSQN':sqlalchemy.types.NUMERIC(precision=24, scale=12, asdecimal=True),\n 'infCpl':sqlalchemy.types.VARCHAR(length=5000),\n 'infAdFisco':sqlalchemy.types.VARCHAR(length=5000)})\n self.barprog['value']= i*chunksize\n self.barprog.update()\n self.barprog['value']=len(df_all_xml)\n self.barprog.update()\n \n#TK\nroot = tk.Tk()\njanela = Leitor(root)\nroot.mainloop()\n\n# =============================================================================\n"
] |
[
[
"pandas.merge",
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.to_timedelta",
"pandas.to_numeric"
]
] |
The-Platypus/ESINet
|
[
"b9b4e8ed8c6a0fa6d082e303014859a054233fd4"
] |
[
"build/lib/ESINet/simulations/simulations.py"
] |
[
"import mne\nimport numpy as np\nimport random\nimport os\nfrom copy import deepcopy\nimport mne\nimport pickle as pkl\nfrom tqdm import tqdm\nimport colorednoise as cn\nimport pickle as pkl\nfrom joblib import Parallel, delayed\nfrom ..util import *\n\ndef run_simulations(pth_fwd, n_simulations=10000, n_sources=(1, 5), extents=(2, 3), \n amplitudes=(5, 10), shape='gaussian', durOfTrial=1, sampleFreq=100, \n regionGrowing=True, n_jobs=-1, return_raw_data=False, return_single_epoch=True):\n ''' A wrapper function for the core function \"simulate_source\" which\n calculates simulations multiple times. \n Parameters:\n -----------\n pth_fwd : str, path of the forward model folder containing forward model\n files n_simulations : int, number of simulations to perform. 100,000 perform\n great, 10,000 are fine for testing. \n parallel : bool, perform simulations in parallel (can be faster) or sequentially \n n_jobs : int, number of jobs to run in parallel, -1 utilizes all cores\n return_raw_data : bool, if True the function returns a list of \n mne.SourceEstimate objects, otherwise it returns raw data\n\n <for the rest see function \"simulate_source\"> \n \n Parameters:\n -----------\n sources : list, list of simulations containing either mne.SourceEstimate \n objects or raw arrays (see <return_raw_data> argument)\n '''\n\n if not pth_fwd.endswith('/'):\n pth_fwd += '/'\n # Load neighbor matrix\n fwd_file = os.listdir(pth_fwd)[np.where(['-fwd.fif' in list_of_files \n for list_of_files in os.listdir(pth_fwd)])[0][0]]\n\n fwd = mne.read_forward_solution(pth_fwd + fwd_file, verbose=0)\n tris_lr = [fwd['src'][0]['use_tris'], fwd['src'][1]['use_tris']]\n neighbors = get_triangle_neighbors(tris_lr)\n # Load dipole positions in\n with open(pth_fwd + '/pos.pkl', 'rb') as file: \n pos = pkl.load(file)[0]\n \n # perform simulations\n settings = {'n_sources':n_sources,\n 'extents': extents, \n 'amplitudes': amplitudes,\n 'shape': shape, \n 'durOfTrial': durOfTrial,\n 'sampleFreq': sampleFreq,\n 'regionGrowing': regionGrowing\n }\n\n print(f'\\nRun {n_simulations} simulations...')\n\n sources = np.stack(Parallel(n_jobs=n_jobs, backend='loky')(\n delayed(simulate_source)(pos, neighbors, **settings) \n for i in tqdm(range(n_simulations))))\n\n if not return_raw_data:\n source_vectors = np.stack([source[0] for source in sources], axis=0)\n has_temporal_dimension = len(np.squeeze(source_vectors).shape) == 3\n if return_single_epoch and not has_temporal_dimension:\n print(f'\\nConvert simulations to a single instance of mne.SourceEstimate...')\n sources = source_to_sourceEstimate(source_vectors, pth_fwd, sfreq=sampleFreq, simulationInfo=sources[0][1]) \n else:\n print(f'\\nConvert simulations to instances of mne.SourceEstimate...')\n sources = Parallel(n_jobs=n_jobs, backend='loky')(\n delayed(source_to_sourceEstimate)(source[0], pth_fwd, sfreq=sampleFreq, simulationInfo=source[1]) \n for source in tqdm(sources))\n else:\n sources = np.stack([sources[i][0] for i in range(n_simulations)], axis=0)\n \n return sources\n\ndef simulate_source(pos, neighbors, n_sources=(1, 5), extents=(2, 3), amplitudes=(5, 10),\n shape='gaussian', durOfTrial=1, sampleFreq=100, regionGrowing=True):\n ''' Returns a vector containing the dipole currents. Requires only a dipole \n position list and the simulation settings.\n\n Parameters:\n -----------\n pos : numpy.ndarray, (n_dipoles x 3), list of dipole positions.\n n_sources : int/tuple/list, number of sources. Can be a single number or a \n list of two numbers specifying a range.\n regionGrowing : bool, whether to use region growing. If True, please supply\n also the neighbors to the settings.\n neighbors : list, a list containing all the (triangle-) neighbors for each \n dipole. Can be calculated using \"get_triangle_neighbors\"\n extents : int/float/tuple/list, size of sources. If regionGrowing==True this \n specifies the neighborhood order (see Grova et al., 2006), otherwise the diameter in mm. Can be a single number or a \n list of two numbers specifying a range.\n amplitudes : int/float/tuple/list, the current of the source in nAm\n shape : str, How the amplitudes evolve over space. Can be 'gaussian' or 'flat' (i.e. uniform).\n durOfTrial : int/float, specifies the duration of a trial.\n sampleFreq : int, specifies the sample frequency of the data.\n Return:\n -------\n source : numpy.ndarray, (n_dipoles x n_timepoints), the simulated source signal\n simSettings : dict, specifications about the source.\n Grova, C., Daunizeau, J., Lina, J. M., Bénar, C. G., Benali, H., & Gotman, J. (2006). Evaluation of EEG localization methods using realistic simulations of interictal spikes. Neuroimage, 29(3), 734-753.\n '''\n \n # Handle input\n\n # Amplitudes come in nAm\n if isinstance(amplitudes, (list, tuple)):\n amplitudes = [amp* 1e-9 for amp in amplitudes] \n else:\n amplitudes *= 1e-9\n\n if isinstance(extents, (list, tuple)):\n if np.max(extents) > 15 and regionGrowing:\n print(f'WARNING: When region growing is selected, extent refers to the neighborhood order. Your order goes up to {np.max(extents)}, but should be max at 10.')\n return\n else:\n if extents > 15 and regionGrowing:\n print(f'WARNING: When region growing is selected, extent refers to the neighborhood order. Your order is set to {np.max(extents)}, but should be max at 10.')\n return\n\n\n if durOfTrial > 0:\n if durOfTrial < 0.5 :\n print(f'durOfTrial should be either 0 or at least 0.5 seconds!')\n return\n \n signalLen = int(sampleFreq*durOfTrial)\n pulselen = sampleFreq/10\n pulse = get_pulse(pulselen)\n signal = np.zeros((signalLen))\n start = int(np.floor((signalLen - pulselen) / 2))\n end = int(np.ceil((signalLen - pulselen) / 2))\n signal[start:-end] = pulse\n signal /= np.max(signal)\n else: # else its a single instance\n sampleFreq = 0\n signal = 1\n \n ###########################################\n # Select ranges and prepare some variables:\n sourceMask = np.zeros((pos.shape[0]))\n # If n_sources is a range:\n if isinstance(n_sources, (tuple, list)):\n n_sources = random.randrange(*n_sources)\n \n if isinstance(extents, (tuple, list)):\n extents = [random.randrange(*extents) for _ in range(n_sources)]\n else:\n extents = [extents for _ in range(n_sources)]\n\n if isinstance(amplitudes, (tuple, list)):\n amplitudes = [random.uniform(*amplitudes) for _ in range(n_sources)]\n else:\n amplitudes = [amplitudes for _ in range(n_sources)]\n \n src_centers = np.random.choice(np.arange(pos.shape[0]), \\\n n_sources, replace=False)\n\n \n source = np.zeros((pos.shape[0]))\n \n ##############################################\n \n for i, src_center in enumerate(src_centers):\n # Smoothing and amplitude assignment\n if regionGrowing:\n d = get_n_order_indices(extents[i], src_center, neighbors)\n dists = np.empty((pos.shape[0]))\n dists[:] = np.inf\n dists[d] = np.sqrt(np.sum((pos - pos[src_center, :])**2, axis=1))[d]\n else:\n dists = np.sqrt(np.sum((pos - pos[src_center, :])**2, axis=1))\n d = np.where(dists<extents[i]/2)[0]\n\n\n if shape == 'gaussian':\n # sd = extents[i]/2 # <-This does not work when extents can also be neighborhood orders\n sd = np.max(dists[d]) / 2 # <- works better\n source[:] += gaussian(dists, 0, sd) * amplitudes[i]\n elif shape == 'flat':\n source[d] += amplitudes[i]\n else:\n raise(BaseException, \"shape must be of type >string< and be either >gaussian< or >flat<.\")\n sourceMask[d] = 1\n\n # if durOfTrial > 0:\n n = np.clip(int(sampleFreq * durOfTrial), a_min=1, a_max=None)\n sourceOverTime = repeat_newcol(source, n)\n source = np.squeeze(sourceOverTime * signal)\n if len(source.shape) == 1:\n source = np.expand_dims(source, axis=1)\n \n # Prepare informative dictionary that entails all infos on how the simulation was created.\n simSettings = dict(scr_center_indices=src_centers, amplitudes=amplitudes, extents=extents, \n shape=shape, sourceMask=sourceMask, regionGrowing=regionGrowing, durOfTrial=durOfTrial,\n sampleFreq=sampleFreq)\n\n return source, simSettings\n\ndef get_pulse(x):\n ''' Returns a pulse of length x'''\n freq = (1/x) / 2\n time = np.arange(x)\n\n signal = np.sin(2*np.pi*freq*time)\n return signal\n\ndef repeat_newcol(x, n):\n ''' Repeat a list/numpy.ndarray x in n columns.'''\n out = np.zeros((len(x), n))\n for i in range(n):\n out[:, i] = x\n return np.squeeze(out)\n\ndef get_n_order_indices(order, pick_idx, neighbors):\n ''' Iteratively performs region growing by selecting neighbors of \n neighbors for <order> iterations.\n '''\n if order == 0:\n return pick_idx\n flatten = lambda t: [item for sublist in t for item in sublist]\n\n current_indices = [pick_idx]\n for cnt in range(order):\n # current_indices = list(np.array( current_indices ).flatten())\n new_indices = [neighbors[i] for i in current_indices]\n new_indices = flatten( new_indices )\n current_indices.extend(new_indices)\n return current_indices\n\ndef gaussian(x, mu, sig):\n return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))\n\ndef get_triangle_neighbors(tris_lr):\n if not np.all(np.unique(tris_lr[0]) == np.arange(len(np.unique(tris_lr[0])))):\n for hem in range(2):\n old_indices = np.sort(np.unique(tris_lr[hem]))\n new_indices = np.arange(len(old_indices))\n for old_idx, new_idx in zip(old_indices, new_indices):\n tris_lr[hem][tris_lr[hem] == old_idx] = new_idx\n\n print('indices were weird - fixed them.')\n numberOfDipoles = len(np.unique(tris_lr[0])) + len(np.unique(tris_lr[1]))\n neighbors = [list() for _ in range(numberOfDipoles)]\n # correct right-hemisphere triangles\n tris_lr_adjusted = deepcopy(tris_lr)\n # the right hemisphere indices start at zero, we need to offset them to start where left hemisphere indices end.\n tris_lr_adjusted[1] += int(numberOfDipoles/2)\n # left and right hemisphere\n for hem in range(2):\n for idx in range(numberOfDipoles):\n # Find the indices of the triangles where our current dipole idx is part of\n trianglesOfIndex = tris_lr_adjusted[hem][np.where(tris_lr_adjusted[hem] == idx)[0], :]\n for tri in trianglesOfIndex:\n neighbors[idx].extend(tri)\n # Remove self-index (otherwise neighbors[idx] is its own neighbor)\n neighbors[idx] = list(filter(lambda a: a != idx, neighbors[idx]))\n # Remove duplicates\n neighbors[idx] = list(np.unique(neighbors[idx]))\n # print(f'idx {idx} found in triangles: {neighbors[idx]}') \n return neighbors\n\ndef add_noise(x, snr, beta=0):\n x = np.squeeze(np.array(x))\n\n if len(x.shape) == 1:\n n_samples = np.clip(len(x), a_min=2, a_max=None)\n noise = cn.powerlaw_psd_gaussian(beta, n_samples)[:len(x)]\n else:\n n_samples = x.shape\n noise = cn.powerlaw_psd_gaussian(beta, n_samples)\n \n if len(noise) == 1:\n rms_noise = noise\n else:\n noise -= np.mean(noise)\n rms_noise = rms(noise)\n\n if len(x) == 1:\n rms_x = x\n else:\n rms_x = rms(x)\n \n rms_noise = rms(noise)\n noise_scaler = rms_x / (rms_noise*snr)\n return x + noise*noise_scaler\n\ndef rms(x):\n return np.sqrt(np.mean(np.square(x)))\n\n\ndef create_eeg_helper(eeg_sample, n_trials, snr, beta):\n if type(snr) == tuple or type(snr) == list:\n snr = random.uniform(*snr)\n # eeg_sample = np.repeat(eeg_sample, n_trials, axis=0)\n eeg_sample = np.repeat(np.expand_dims(eeg_sample, 0), n_trials, axis=0)\n\n # noise_trial = np.stack([add_noise(eeg_sample, snr, beta) for trial in range(n_trials)], axis=0)\n noise_trial = add_noise(eeg_sample, snr, beta)\n \n return noise_trial\n\n\ndef create_eeg(sourceEstimates, pth_fwd, snr=2, n_trials=20, beta=1, n_jobs=-1,\n return_raw_data=False, return_single_epoch=True):\n ''' Create EEG of specified number of trials based on sources and some SNR.\n Parameters:\n -----------\n sourceEstimates : list, list containing mne.SourceEstimate objects\n pth_fwd : str, path to the forward model files\n snr : tuple/list/float, desired signal to noise ratio within individual \n trials. Can be a list or tuple of two floats specifying a range.\n n_trials : int, number of simulated trials\n beta : float, determines the frequency spectrum of the noise added \n to the signal: power = (1/f)^beta. \n 0 will yield white noise, \n 1 will yield pink noise (1/f spectrum)\n n_jobs : int, Number of jobs to run in parallel. \n -1 will utilize all cores.\n return_raw_data : bool, if True the function returns a list of \n mne.SourceEstimate objects, otherwise it returns raw data\n Return:\n -------\n epochs : list, list of either mne.Epochs objects or list of raw EEG \n data (see argument <return_raw_data> to change output).\n '''\n # Unpack the source data from the SourceEstimate objects\n if type(sourceEstimates) == mne.source_estimate.SourceEstimate:\n sources = np.transpose(sourceEstimates.data)\n sfreq = sourceEstimates.simulationInfo['sampleFreq']\n n_timepoints = 1\n elif type(sourceEstimates) == list:\n sources = np.stack([se.data for se in sourceEstimates], axis=0)\n sfreq = sourceEstimates[0].simulationInfo['sampleFreq']\n n_timepoints = sources.shape[-1]\n elif type(sourceEstimates) == np.ndarray:\n sources = np.squeeze(sourceEstimates)\n if len(sources.shape) == 2:\n sources = np.expand_dims(sources, axis=-1)\n sfreq = 1\n print(f'sources.shape={sources.shape}')\n n_timepoints = sources.shape[-1]\n else:\n msg = f'sourceEstimates must be of type <list> or <mne.source_estimate.SourceEstimate> but is of type <{type(sourceEstimates)}>'\n raise ValueError(msg)\n\n # Load some forward model objects\n leadfield = load_leadfield(pth_fwd)\n info = load_info(pth_fwd)\n info['sfreq'] = sfreq\n \n n_samples = len(sources)\n n_elec = leadfield.shape[0]\n \n\n eeg_clean = np.stack([np.matmul(leadfield, y) for y in sources], axis=0)\n\n eeg_trials_noisy = np.zeros((n_samples, n_trials, n_elec, n_timepoints))\n\n print(f'\\nCreate EEG trials with noise...')\n eeg_trials_noisy = np.stack(Parallel(n_jobs=n_jobs, backend='loky')\n (delayed(create_eeg_helper)(eeg_clean[sample], n_trials, snr, beta) \n for sample in tqdm(range(n_samples))), axis=0)\n \n if n_trials == 1 and len(eeg_trials_noisy.shape) == 2:\n # Add empty dimension to contain the single trial\n eeg_trials_noisy = np.expand_dims(eeg_trials_noisy, axis=1)\n\n \n if len(eeg_trials_noisy.shape) == 3:\n eeg_trials_noisy = np.expand_dims(eeg_trials_noisy, axis=-1)\n \n if eeg_trials_noisy.shape[2] != n_elec:\n eeg_trials_noisy = np.swapaxes(eeg_trials_noisy, 1, 2)\n\n if not return_raw_data:\n if return_single_epoch:\n print(f'\\nConvert EEG matrices to a single instance of mne.Epochs...')\n print(f'eeg_trials_noisy.shape={eeg_trials_noisy.shape}')\n ERP_samples_noisy = np.mean(eeg_trials_noisy, axis=1)\n epochs = eeg_to_Epochs(ERP_samples_noisy, pth_fwd, info=info)\n\n else:\n print(f'\\nConvert EEG matrices to instances of mne.Epochs...')\n epochs = Parallel(n_jobs=n_jobs, backend='loky')(\n delayed(eeg_to_Epochs)(sample, pth_fwd, info=info) \n for sample in tqdm(eeg_trials_noisy))\n else:\n epochs = eeg_trials_noisy\n\n return epochs\n\n# def create_eeg(sources, pth_fwd, snr=1, n_trials=20, beta=0):\n# ''' Create EEG of specified number of trials based on sources and some SNR.'''\n# with open(pth_fwd + '/leadfield.pkl', 'rb') as file:\n# leadfield = pkl.load(file)[0]\n# n_samples = len(sources)\n# n_elec = leadfield.shape[0]\n# n_timepoints = sources[0][0].shape[1]\n\n# eeg_clean = np.stack([np.matmul(leadfield, y[0]) for y in sources], axis=0)\n\n\n\n# eeg_trials_noisy = np.zeros((n_samples, n_trials, n_elec, n_timepoints))\n \n# for sample in tqdm(range(n_samples)):\n# noise_trial = np.stack([add_noise(eeg_clean[sample], snr, beta) for trial in range(n_trials)], axis=0)\n# if len(noise_trial.shape) == 2:\n# noise_trial = np.expand_dims(noise_trial, axis=-1)\n\n# eeg_trials_noisy[sample, :, :, :] = noise_trial\n\n# return eeg_trials_noisy\n"
] |
[
[
"numpy.expand_dims",
"numpy.squeeze",
"numpy.max",
"numpy.mean",
"numpy.where",
"numpy.square",
"numpy.swapaxes",
"numpy.unique",
"numpy.arange",
"numpy.matmul",
"numpy.stack",
"numpy.sin",
"numpy.ceil",
"numpy.zeros",
"numpy.power",
"numpy.floor",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"numpy.empty"
]
] |
luiscarlosgph/semi-synthetic
|
[
"25780c1ed3b02cde96830befef74a898b1d16eab"
] |
[
"src/common.py"
] |
[
"\"\"\"\n@brief Common functions used for prototyping methods and handling data.\n@author Luis Carlos Garcia-Peraza Herrera ([email protected]).\n@date 17 March 2017.\n\"\"\"\n\nimport sys\nimport math\nimport os\nimport re\nimport random # choice\nimport string # ascii_lowercase\nimport ntpath # basename\nimport numpy as np # np.array\nimport tempfile # tempfile.NamedTemporaryFile\nimport decimal\nimport imghdr\nimport shutil\nimport cv2\nimport json\nimport collections\nimport zipfile\nimport datetime\n\n# -- Constants -- ##\nALLOWED_IMAGE_FORMATS = [\n 'gif',\n 'pbm',\n 'pgm',\n 'ppm',\n 'tiff',\n 'xbm',\n 'jpeg',\n 'bmp',\n 'png']\n\n# -- Regular expressions -- ##\nINT_RE = r'(?:[-+]?\\d+)'\nFLOAT_RE = r'(?:[-+]?\\d*\\.\\d+(?:[eE][-+]\\d+)?|\\d+)'\nBLANKS_RE = r'(?:(?:[ \\t\\r\\n])+)'\n\n#\n# @class for terminal colours. Use it like this:\n#\n# print bcolors.WARNING + \"Warning: No active frommets remain. Continue?\" + bcolors.ENDC\n#\n# @details Credit to @joeld:\n# http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python\n#\nclass bcolours:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\nclass Colour:\n \"\"\"@class Colour stores colours in BGR uint8 format.\"\"\"\n RED = [0, 0, 255]\n GREEN = [0, 255, 0]\n BLUE = [255, 0, 0]\n YELLOW = [0, 255, 255]\n MAGENTA = [255, 0, 255]\n BLACK = [0, 0, 0]\n WHITE = [255, 255, 255]\n\n\ndef gen_rand_str(length=8):\n \"\"\"\n @brief Generates a random lowercase string.\n @param[in] length Desired length of the returned string.\n @returns a random string of the given length.\n \"\"\"\n return ''.join(random.choice(string.ascii_lowercase) for i in range(length))\n\n\n#\n# @brief Converts degrees to radians.\n#\n# @returns the amount of radians equivalent to the input 'degrees'.\ndef deg_to_rad(degrees):\n return math.pi * degrees / 180.0\n\n\n#\n# @brief Converts radians to degrees.\n#\ndef rad_to_deg(r):\n return r * 180.0 / math.pi\n\n\n#\n# @brief Lists a directory.\n#\n# @param[in] path String containing the path (relative or absolute) to the folder that you want to\n# list.\n# @returns a list of files and folders inside the given path.\ndef listdir(path):\n return filter(None, sorted(os.listdir(path)))\n\n\n#\n# @brief Lists a directory removing the extension of the files and the hidden files.\n#\n# @param[in] path String containing the path (relative or absolute) to the folder that you want to\n# list.\n#\n# @returns a list of -unhidden- files and folders inside the given path.\ndef listdir_no_hidden(path):\n return natsort([f for f in listdir(path) if not f.startswith('.')])\n\n\n#\n# @brief Lists a directory removing the extension of the files. Folder names will reimain untouched.\n#\n# @param[in] path String containing the path (relative or absolute) to the folder that you want to\n# list.\n#\n# @returns a list of files and folders inside the given path.\ndef listdir_no_ext(path):\n return [f.split('.')[0] for f in listdir(path)]\n\n\n#\n# @brief Lists a directory removing the extension of the files and the hidden files.\n#\n# @param[in] path String containing the path (relative or absolute) to the folder that you want to\n# list.\n#\n# @returns a list of -unhidden- files (without extension) and folders inside the given path.\ndef listdir_no_ext_no_hidden(path):\n return [f for f in listdir_no_ext(path, suffix) if not f.startswith('.')]\n\n\n#\n# @brief Lists a directory.\n#\n# @param[in] path String containing the path (relative or absolute) to the folder that you want to\n# list.\n#\n# @returns a list of absolute paths pointing to the files and folders inside the given path.\ndef listdir_absolute(path):\n absolute_path = os.path.abspath(path)\n return [absolute_path + '/' + fpath for fpath in listdir(path)]\n\n\n#\n# @brief Lists a directory without listing hidden files.\n#\n# @param[in] path String containing the path (relative or absolute) to the folder that you want to\n# list.\n#\n# @returns a list of absolute paths pointing to the files and folders inside the given path.\ndef listdir_absolute_no_hidden(path):\n absolute_path = os.path.abspath(path)\n return [absolute_path + '/' + fpath for fpath in listdir_no_hidden(path)]\n\n\n#\n# @brief Writes a message to screen and flushes the buffers.\n#\n# @param[in] message is the message that will be printed to the user.\n#\n# @returns nothing.\ndef write_info(msg):\n sys.stdout.write(\n '[' +\n bcolours.OKGREEN +\n 'INFO' +\n bcolours.ENDC +\n '] ' +\n msg)\n sys.stdout.flush()\n\n\n#\n# @brief Writes a message and a '\\n' to screen and flushes the buffers.\n#\n# @param[in] msg is the message that will be printed to the user.\n#\n# @returns nothing.\ndef writeln_info(msg):\n sys.stdout.write(\n '[' + bcolours.OKGREEN + 'INFO' + bcolours.ENDC + '] ' + str(msg) + '\\n')\n sys.stdout.flush()\n\n\n#\n# @brief Writes a warning message to screen and flushes the buffers.\n#\n# @param[in] message is the message that will be printed to the user.\n#\n# @returns nothing.\ndef write_warn(msg):\n sys.stdout.write(\n '[' +\n bcolours.WARNING +\n 'WARN' +\n bcolours.ENDC +\n '] ' +\n msg)\n sys.stdout.flush()\n\n\n#\n# @brief Writes a warning message and a '\\n' to screen and flushes the buffers.\n#\n# @param[in] msg is the message that will be printed to the user.\n#\n# @returns nothing.\ndef writeln_warn(msg):\n sys.stdout.write(\n '[' +\n bcolours.WARNING +\n 'WARN' +\n bcolours.ENDC +\n '] ' +\n msg +\n '\\n')\n sys.stdout.flush()\n\n\n#\n# @brief Writes an error message to screen and flushes the buffers.\n#\n# @param[in] message is the message that will be printed to the user.\n#\n# @returns nothing.\ndef write_error(msg):\n sys.stdout.write(\n '[' +\n bcolours.OKGREEN +\n 'ERROR' +\n bcolours.ENDC +\n '] ' +\n msg)\n sys.stdout.flush()\n\n\n#\n# @brief Writes an error message and a '\\n' to screen and flushes the buffers.\n#\n# @param[in] msg is the message that will be printed to the user.\n#\n# @returns nothing.\ndef writeln_error(msg):\n sys.stdout.write(\n '[' +\n bcolours.OKGREEN +\n 'ERROR' +\n bcolours.ENDC +\n '] ' +\n msg +\n '\\n')\n sys.stdout.flush()\n\n\n#\n# @brief Writes an 'OK\\n' to screen and flushes buffer.\n#\n# @returns nothing.\ndef writeln_ok():\n sys.stdout.write('[' + bcolours.OKGREEN + 'OK' + bcolours.ENDC + \"]\\n\")\n sys.stdout.flush()\n\n\n#\n# @brief Writes an '[FAIL]\\n' to screen and flushes buffer.\n#\n# @returns nothing.\ndef writeln_fail():\n sys.stdout.write('[' + bcolours.FAIL + 'FAIL' + bcolours.ENDC + \"]\\n\")\n sys.stdout.flush()\n\n\n#\n# @brief Extract a filename (without file extension) from a (relative or absolute) path.\n#\n# @param[in] path Path pointing to a file (can be relative or absolute).\n#\n# @returns the filename without extension.\ndef get_fname_no_ext(path):\n fname, ext = os.path.splitext(ntpath.basename(path))\n return fname\n\n\n#\n# @brief Obtain extension from file path. Warning: The dot '.' is also returned!\n#\n# @param[in] path Path pointing to a file (can be relative or absolute).\n#\n# @returns a string with the extension of the given path.\ndef get_ext(path):\n fname, ext = os.path.splitext(ntpath.basename(path))\n return ext\n\n\n#\n# @brief Function that reads a file.\n#\n# @param[in] path to the file to be read.\n#\n# @returns the contents of the file.\ndef read_file(path):\n with open(path, 'r') as myfile:\n contents = myfile.read()\n return contents\n\n\n#\n# @def Function to read each line of a file into a list.\n#\n# @param[in] path to the file to be read.\n#\n# @returns an array of lines.\ndef read_file_by_lines(path):\n with open(path) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n return content\n\n\n#\n# @param[in] path Path to the file whose existance you want to check.\n#\n# @returns true if the file exists, otherwise returns false.\ndef file_exists(fpath):\n return True if os.path.isfile(fpath) else False\n\n\n#\n# @param[in] path Path to the folder whose existance you want to check.\n#\n# @returns true if folder exists, otherwise returns false.\ndef dir_exists(dpath):\n return True if os.path.isdir(dpath) else False\n\n\n#\n# @param[in] path Path to a possible file or folder.\n#\n# @returns True if there is a file or folder that already exists in the given path.\ndef path_exists(path):\n return True if (file_exists(path) or dir_exists(path)) else False\n\n\n#\n# @brief Get the path to the directory that contains a certain file.\n#\n# @param[in] path Path to the file.\n#\n# @returns absolute path to the directory where the file is located.\ndef get_file_dir(path):\n assert(file_exists(path))\n return os.path.dirname(os.path.realpath(path))\n\n\n#\n# @brief Appends a string to a text file.\n#\n# @param[in] path Path to the text file. It should exist already.\n# @param[in] text Text to be appended to the file.\n#\n# @returns nothing.\ndef append(path, text):\n # Sanity check: the file must exist.\n if not file_exists(path):\n raise RuntimeError('[append] Error the file must already exist.')\n\n # Append text to existing file\n with open(path, \"a\") as myfile:\n myfile.write(text)\n\n\n#\n# @brief Copies a file to a destination raising an exception if the destination file already exists.\n#\n# @details This function will raise exceptions if either of the files do not exist or if they are\n# not files.\n#\n# @param[in] source Source path.\n# @param[in] destination Destination path.\n#\n# @returns nothing.\ndef copy_file(source, destination):\n # Sanity check: source exists and it is a file\n if not file_exists(source):\n raise RuntimeError(\n '[copy_file] Error, source file [ ' +\n source +\n ' ] does not exist.')\n\n # Sanity check: destination file does not exist\n if file_exists(destination):\n raise RuntimeError('[copy_file] Error, destination file [ ' + destination + ' ] already '\n + 'exists.')\n\n shutil.copy(source, destination)\n\n\n#\n# @brief Moves a file to a destination raising an exception if the destination file already exists.\n#\n# @details This function will raise exceptions if either of the files do not exist or if they are\n# not files.\n#\n# @param[in] source Source path.\n# @param[in] destination Destination path.\n#\n# @returns nothing.\ndef move_file(source, destination):\n # Sanity check: source exists and it is a file\n if not file_exists(source):\n raise RuntimeError(\n '[copy_file] Error, source file [ ' +\n source +\n ' ] does not exist.')\n\n # Sanity check: destination file does not exist\n if file_exists(destination):\n raise RuntimeError('[copy_file] Error, destination file [ ' + destination + ' ] already '\n + 'exists.')\n\n shutil.move(source, destination)\n\n\n#\n# @brief Copy directory and all its contents.\n#\n# @param[in] src Source path to the directory.\n# @param[in] dst Destination path to the directory.\n#\n# @returns nothing.\ndef copy_dir(src, dst):\n assert(path_exists(src))\n assert(not path_exists(dst))\n try:\n shutil.copytree(src, dest)\n except shutil.Error as e:\n print('Directory not copied. Error: %s' % e)\n except OSError as e:\n print('Directory not copied. Error: %s' % e)\n\n\n#\n# @brief Move directory with all its contents to a new path.\n#\n# @param[in] src Source directory path.\n# @param[in] dst Destination directory path.\n#\n# @returns nothing.\ndef move_dir(src, dst):\n assert(path_exists(src))\n assert(not path_exists(dst))\n try:\n shutil.move(src, dest)\n except shutil.Error as e:\n print('Directory not copied. Error: %s' % e)\n except OSError as e:\n print('Directory not copied. Error: %s' % e)\n\n\n#\n# @brief Create a temporary directory.\n#\n# @returns the path to the temporary directory created.\ndef mk_temp_dir():\n dir_path = tempfile.gettempdir() + '/' + gen_rand_str()\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n else:\n raise RuntimeError('[mk_temp_dir] Error, the randomly generated temporary directory '\n + 'already exists.')\n return dir_path\n\n\n#\n# @brief Create folder.\n#\n# @param[in] path to the new folder.\n#\n# @returns nothing.\ndef mkdir(path):\n if os.path.exists(path):\n raise RuntimeError(\n '[mkdir] Error, this path already exists so a folder cannot be created.')\n os.makedirs(path)\n\n\n#\n# @brief Remove directory.\n#\n# @param[in] path to the folder to be deleted.\n#\n# @returns nothing.\ndef rmdir(path):\n shutil.rmtree(path)\n\n\n#\n# @brief Remove file. Only files can be removed with this function.\n#\n# @param[in] path to the file that will be removed.\n#\n# @returns nothing.\ndef rm(path):\n # Assert that the path is a file\n if not file_exists(path):\n raise RuntimeError('[rm] The given path is not a file.')\n\n os.unlink(path)\n\n\n#\n# @brief Runs a shell command and returns the output.\n#\n# @param[in] cmd String with the command that will be run.\n#\n# @returns the output (stdout and stderr) of the command.\ndef shell(cmd):\n # Generate the name of the random log file that will store the command\n # output\n tmp_log_file = tempfile.gettempdir() + '/' + gen_rand_str() + '.log'\n cmd += ' > ' + tmp_log_file + ' 2>&1'\n\n # Run command\n os.system(cmd)\n\n # Read command output from logfile\n fd = open(tmp_log_file, 'r')\n output = fd.read()\n fd.close()\n\n # Delete log file\n rm(tmp_log_file)\n\n return output\n\n\n#\n# @brief Restarts the line, VT100 terminals only.\n#\ndef reset_line():\n sys.stdout.write('\\x1b[2K\\r')\n sys.stdout.flush()\n\n\n#\n# @brief Moves to the next terminal line.\n#\ndef new_line():\n sys.stdout.write('\\r\\n')\n sys.stdout.flush()\n\n\n#\n# @brief Saves a dictionary to file.\n#\n# @param[in] d Dictionary to be written to file.\n# @param[in] path String with the path to the output file.\n# @param[in] indent Indent that will be used for the JSON file.\n#\n# @returns nothing.\ndef save_dict_as_json(d, path, indent=1, overwrite=False):\n path = os.path.abspath(path)\n if not overwrite:\n assert(not file_exists(path))\n\n # Convert dict into a JSON string\n contents = json.dumps(d, sort_keys=True, indent=indent)\n\n # Write output file\n with open(path, \"w\") as text_file:\n text_file.write(contents)\n\n\n#\n# @brief Loads a JSON of a single section and parameters as a dictionary.\n#\n# @param[in] path Path to the existing JSON file.\n#\n# @returns a dictionary with the configuration in the file.\ndef load_json_as_dict(path):\n path = os.path.abspath(path)\n assert(file_exists(path))\n\n # Read file\n with open(path, \"r\") as text_file:\n contents = text_file.read()\n\n # Convert file into a dict\n d = json.loads(contents)\n assert(isinstance(d, type({})))\n\n return d\n\n\n#\n# @brief Converts all the keys and values of a dictionary to int.\n#\n# @param[in] Input dictionary.\n#\n# @returns a dictionary that contains the same values but of type() int.\ndef convert_dict_to_int(data):\n return {int(k): int(v) for k, v in data.items()}\n\n\n#\n# @brief Splits a string that is separated by a particular char.\n#\n# @details Usage:\n# >>> split_at('this_is_my_name_and_its_cool', '_', 2)\n# >>> ('this_is', 'my_name_and_its_cool')\n#\n# Code from:\n# https://stackoverflow.com/questions/27227399/python-split-a-string-at-an-underscore\n#\n# @param[in] s String that you want to split.\n# @param[in] c Character that separates the string.\n# @param[in] n Number of the split that you want to take.\n#\ndef split_at(s, c, n):\n words = s.split(c)\n return c.join(words[:n]), c.join(words[n:])\n\n\n#\n# @brief Same effect as the Unix command 'touch'.\n#\n# @param[in] fname Path to the file.\n# @param[in] times (atime, mtime) for the file.\n#\n# @returns nothing.\ndef touch(fname, times=None):\n with open(fname, 'a'):\n os.utime(fname, times)\n\n\n#\n# @brief Draw a spot on an image.\n#\n# @param[in] x Column of the image.\n# @param[in] y Row of the image.\n# @param[in] radius Radius of the spot.\n# @param[in] colour BGR colour as a list of uint8.\n#\n# @returns the original image with the sports overlayed on top.\ndef draw_spot(img, x, y, radius=1, colour=Colour.MAGENTA):\n retval = img.copy()\n cv2.circle(retval, (x, y), radius, colour, thickness=-1)\n return retval\n\n\n#\n# @brief Draw a line on an image.\n#\n# @param[in] x0 X coordinate of the initial point of the line.\n# @param[in] y0 Y coordinate of the initial point of the line.\n# @param[in] x1 X coordinate of the final point of the line.\n# @param[in] y1 Y coordinate of the final point of the line.\n# @param[in] thickness Radius of the spot.\n# @param[in] colour BGR colour as a list of uint8.\n#\n# @returns the original image with the sports overlayed on top.\ndef draw_line(img, x0, y0, x1, y1, thickness=1, colour=Colour.MAGENTA):\n retval = img.copy()\n cv2.line(retval, (x0, y0), (x1, y1), colour, thickness)\n return retval\n\n\n#\n# @brief Zips a folder and all its contents to a file.\n#\n# @param[in] input_dir_path Path to the directory that we will zip.\n# @param[in] output_path Path to the output zip file.\n#\n# @returns nothing.\ndef zipdir(input_dir_path, output_path):\n assert(dir_exists(input_dir_path))\n assert(not file_exists(output_path))\n\n shutil.make_archive(output_path, 'zip', input_dir_path)\n\n\n#\n# @brief Unzip file into a specific path.\n#\n# @param[in] input_zip_path Path to the zip file to be extracted.\n# @param[in] output_dir Path to the folder where the contents of the zip will be extracted.\n#\n# @returns nothing.\ndef unzipdir(input_zip_path, output_dir):\n assert(file_exists(input_zip_path))\n\n zip_ref = zipfile.ZipFile(input_zip_path)\n zip_ref.extractall(output_dir)\n zip_ref.close()\n\n\n#\n# @brief Generates a random [0, 1].\n#\ndef randbin():\n return np.random.choice([0, 1])\n\n\n#\n# @brief Save string to file.\n#\ndef save_str_to_file(contents, path):\n with open(path, 'w') as text_file:\n text_file.write(\"{0}\".format(contents))\n\n\n#\n# @brief Merge two Python dictionaries.\n#\ndef merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z\n\n\n#\n# @brief Natural sort of a list.\n#\n# @param[in] l List to sort.\n#\n# @returns a new list sorted taking into account numbers and not just their ASCII codes.\ndef natsort(l):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c)\n for c in re.split('([0-9]+)', str(key))]\n return sorted(l, key=alphanum_key)\n\n\n#\n# @brief Get the date and time of now.\n#\n# @param[in] dt_format Format of the date and time.\n#\ndef datetime_str(dt_format='%H_%M_%S__%d_%b_%Y'):\n return datetime.datetime.now().strftime(dt_format)\n\n#\n# @brief Convert dictionary into HTML.\n#\ndef convert_dict_to_html(dict_obj, indent=0):\n contents = ' ' * indent + \"<ul>\\n\"\n for k, v in dict_obj.items():\n if isinstance(v, dict):\n contents += (' ' * indent) + '<li>' + k + ': </li>'\n contents += convert_dict_to_html(v, indent + 1)\n else:\n contents += (\n ' ' * indent) + '<li>' + str(\n k) + ': ' + str(\n v) + '</li>'\n contents += ' ' * indent + '</ul>\\n'\n return contents\n\n\n#\n# @brief check wether all elements are greater than a number.\n#\n# @param[in] tensor numpy ndarray.\n# @param[in] thresh threshold.\n# @param[in] eps epsilon for comparison.\n#\n# @returns true if all the values of the tensor are greater than thresh.\n#\ndef tensor_gt(tensor, thresh):\n flat_tensor = tensor.flatten()\n return np.where(flat_tensor > thresh)[0].shape[0] == flat_tensor.shape[0]\n\n\n#\n# @brief check wether all elements are greater or equal than a number.\n#\n# @param[in] tensor numpy ndarray.\n# @param[in] thresh threshold.\n# @param[in] eps epsilon for comparison.\n#\n# @returns true if all the values of the tensor are greater or equal than thresh.\n#\ndef tensor_gt_eq(tensor, thresh):\n flat_tensor = tensor.flatten()\n return np.where(flat_tensor >= thresh)[0].shape[0] == flat_tensor.shape[0]\n\n\n#\n# @brief Check wether all elements are lower than a number.\n#\n# @param[in] tensor Numpy ndarray.\n# @param[in] thresh Threshold.\n# @param[in] eps Epsilon for comparison.\n#\n# @returns true if all the values of the tensor are lower than thresh.\n#\ndef tensor_lt(tensor, thresh):\n flat_tensor = tensor.flatten()\n return np.where(flat_tensor < thresh)[0].shape[0] == flat_tensor.shape[0]\n\n\n#\n# @brief Check wether all elements are lower or equal than a number.\n#\n# @param[in] tensor Numpy ndarray.\n# @param[in] thresh Threshold.\n# @param[in] eps Epsilon for comparison.\n#\n# @returns true if all the values of the tensor are lower or equal than thresh.\n#\ndef tensor_lt_eq(tensor, thresh):\n flat_tensor = tensor.flatten()\n return np.where(flat_tensor <= thresh)[0].shape[0] == flat_tensor.shape[0]\n\n\n#\n# @returns true if a tensor is all zeros. Otherwise false is returned.\n#\ndef tensor_all_zeros(tensor):\n return not tensor.any()\n\n\n#\n# @brief Convert image from PIL to OpenCV.\n#\ndef pil_to_cv2(im):\n return cv2.cvtColor(np.array(im), cv2.COLOR_RGBA2BGR)\n\n\n#\n# @brief Encode a dictionary into JSON. It must contain either ints, floats, strings, or\n# numpy arrays. Other datatypes will raise a ValueError.\n#\ndef dic_to_json(dic):\n\n # Build a serialisable dic, converting numpy arrays into\n serialisable_dic = {}\n for k in dic:\n v = dic[k]\n if isinstance(v, int) or isinstance(v, float) or isinstance(v, str):\n serialisable_dic[k] = v\n elif isinstance(v, np.ndarray):\n serialisable_dic[k] = v.tolist()\n else:\n raise ValueError(\n 'The data type ' + str(type(v)) + ' is not serialisable.')\n\n return json.dumps(serialisable_dic)\n\n\n#\n# @brief Decode a JSON into a dictionary.\n#\ndef json_to_dic(jsonstr):\n\n raw_dic = json.loads(jsonstr)\n dic = {}\n for k in raw_dic:\n v = raw_dic[k]\n if isinstance(v, list):\n dic[k] = np.array(v)\n else:\n dic[k] = v\n\n return dic\n\n\n#\n# @brief Computes the index of the median for a given array.\n#\ndef argmedian(data):\n return np.argsort(data)[len(data) // 2]\n\n\ndef randargmax(b, **kw):\n return np.argmax(np.random.random(b.shape) * (b == b.max()), **kw)\n\n\ndef randargmin(b, **kw):\n return np.argmin(np.random.random(b.shape) * (b == b.min()), **kw)\n\n\n#\n# @brief Convert a video file to a folder of frames.\n#\ndef convert_video_to_images(\n video_path,\n folder_path,\n fps,\n prefix='',\n fmt='%05d'):\n if dir_exists(folder_path):\n raise RuntimeError(\n 'When converting a video to images the output folder should not exist.')\n mkdir(folder_path)\n cmd = 'ffmpeg -i ' + video_path + ' -r ' + str(fps) + ' -f image2 ' + folder_path + '/' \\\n + prefix + fmt + '.png'\n return shell(cmd)\n\n\n#\n# @brief Find the nearest element in an array.\n#\ndef find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return array[idx]\n\n\n#\n# @brief Find the nearest element in an array.\n#\ndef find_nearest_index(array, value):\n array = np.asarray(array)\n return (np.abs(array - value)).argmin()\n\n\n# This module cannot be executed as a script because it is not a script :)\nif __name__ == '__main__':\n print(\n 'Error, this module is not supposed to be executed by itself.',\n sys.stderr)\n sys.exit(1)\n"
] |
[
[
"numpy.random.random",
"numpy.abs",
"numpy.random.choice",
"numpy.asarray",
"numpy.argsort",
"numpy.array",
"numpy.where"
]
] |
GraffJosh/locomotion_simulation
|
[
"37f42d26555867e05f55178f50bdb9252e51fd51"
] |
[
"locomotion/agents/whole_body_controller/com_velocity_estimator.py"
] |
[
"\"\"\"State estimator.\"\"\"\n\nimport numpy as np\nfrom typing import Any, Sequence\n\nfrom locomotion.utilities.moving_window_filter import MovingWindowFilter\n\n_DEFAULT_WINDOW_SIZE = 20\n\nclass COMVelocityEstimator(object):\n \"\"\"Estimate the CoM velocity using on board sensors.\n\n\n Requires knowledge about the base velocity in world frame, which for example\n can be obtained from a MoCap system. This estimator will filter out the high\n frequency noises in the velocity so the results can be used with controllers\n reliably.\n\n \"\"\"\n\n def __init__(\n self,\n robot: Any,\n window_size: int = _DEFAULT_WINDOW_SIZE,\n ):\n self._robot = robot\n self._window_size = window_size\n self.reset(0)\n\n @property\n def com_velocity_body_frame(self) -> Sequence[float]:\n \"\"\"The base velocity projected in the body aligned inertial frame.\n\n The body aligned frame is a intertia frame that coincides with the body\n frame, but has a zero relative velocity/angular velocity to the world frame.\n\n Returns:\n The com velocity in body aligned frame.\n \"\"\"\n return self._com_velocity_body_frame\n\n @property\n def com_velocity_world_frame(self) -> Sequence[float]:\n return self._com_velocity_world_frame\n\n def reset(self, current_time):\n del current_time\n # We use a moving window filter to reduce the noise in velocity estimation.\n self._velocity_filter_x = MovingWindowFilter(\n window_size=self._window_size)\n self._velocity_filter_y = MovingWindowFilter(\n window_size=self._window_size)\n self._velocity_filter_z = MovingWindowFilter(\n window_size=self._window_size)\n self._com_velocity_world_frame = np.array((0, 0, 0))\n self._com_velocity_body_frame = np.array((0, 0, 0))\n\n def update(self, current_time):\n del current_time\n velocity = self._robot.GetBaseVelocity()\n\n vx = self._velocity_filter_x.calculate_average(velocity[0])\n vy = self._velocity_filter_y.calculate_average(velocity[1])\n vz = self._velocity_filter_z.calculate_average(velocity[2])\n self._com_velocity_world_frame = np.array((vx, vy, vz))\n\n base_orientation = self._robot.GetTrueBaseOrientation()\n _, inverse_rotation = self._robot.pybullet_client.invertTransform(\n (0, 0, 0), base_orientation)\n\n self._com_velocity_body_frame, _ = (\n self._robot.pybullet_client.multiplyTransforms(\n (0, 0, 0), inverse_rotation, self._com_velocity_world_frame,\n (0, 0, 0, 1)))\n"
] |
[
[
"numpy.array"
]
] |
uakarsh/tensorflow
|
[
"af583ca24e7592e09ba4fbc65e25e32bea3311cc"
] |
[
"tensorflow/python/distribute/cross_device_ops_test.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for CrossDeviceOps.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport threading\nimport time\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import tensorflow_server_pb2\nfrom tensorflow.python.distribute import cluster_resolver as cluster_resolver_lib\nfrom tensorflow.python.distribute import collective_util\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import cross_device_utils\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import multi_process_runner\nfrom tensorflow.python.distribute import multi_worker_test_base\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import test_util\nfrom tensorflow.python.distribute import values as value_lib\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import indexed_slices\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import collective_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.util import nest\n\nCollectiveReplicaLauncher = cross_device_utils.CollectiveReplicaLauncher\nCommunicationImplementation = collective_util.CommunicationImplementation\nReduceOp = reduce_util.ReduceOp\nIndexedSlicesValue = indexed_slices.IndexedSlicesValue\nIndexedSlices = indexed_slices.IndexedSlices\n\n\ndef make_per_replica_value(value, devices):\n \"\"\"Creates a `PerReplica` object whose values reside in `devices`.\n\n Args:\n value: a tensor-convertible value or a `IndexedSlicesValue`, or a callable\n that takes one argument (`device_idx`) and should return the value that is\n going to be created on devices[device_idx].\n devices: a list of device strings to create `PerReplica` values on.\n\n Returns:\n A `PerReplica` object.\n \"\"\"\n values = []\n for device_idx, device in enumerate(devices):\n if callable(value):\n v = value(device_idx)\n elif isinstance(value, list):\n v = value[device_idx]\n else:\n v = value\n if isinstance(v, IndexedSlicesValue):\n with ops.device(device):\n values.append(\n IndexedSlices(\n values=array_ops.identity(v.values),\n indices=array_ops.identity(v.indices),\n dense_shape=array_ops.identity(v.dense_shape)))\n else:\n with ops.device(device):\n values.append(array_ops.identity(v))\n return value_lib.PerReplica(values)\n\n\ndef enable_collective_ops():\n \"\"\"Enable collectives in the current process.\"\"\"\n cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()\n context.context().configure_collective_ops(\n collective_leader=\"'/job:worker/replica:0/task:0'\")\n config_proto = config_pb2.ConfigProto()\n config_proto.experimental.collective_group_leader = (\n \"/job:worker/replica:0/task:0\")\n server_def = tensorflow_server_pb2.ServerDef(\n cluster=cluster_resolver.cluster_spec().as_cluster_def(),\n default_session_config=config_proto,\n job_name=cluster_resolver.task_type,\n task_index=cluster_resolver.task_id,\n protocol=cluster_resolver.rpc_layer)\n context.context().enable_collective_ops(server_def)\n # Recover default flag values.\n CollectiveReplicaLauncher._prefer_unique_instance_key = True\n CollectiveReplicaLauncher._prefer_ordering_token = False\n\n\nclass MultiProcessPoolRunner():\n\n def __init__(self, num_processes):\n cluster_spec_dict = multi_worker_test_base.create_cluster_spec(\n num_workers=num_processes)\n self.runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec_dict)\n\n\n# Global MultiProcessPoolRunners that can be shared by test cases to avoid\n# expensive initialization cost of TensorFlow in new processes.\n#\n# Note that they have to be globals and can't be owned by test classes because\n# usually fn usually captures the test class instance, and test class\n# instance can't be pickled if it has mpr as a member (it is not allowed to\n# pickle Process objects).\n# TODO(crccw): Use `num_workers` combination once it is ready.\nglobal_mpr_2p = MultiProcessPoolRunner(num_processes=2)\nglobal_mpr_1p = MultiProcessPoolRunner(num_processes=1)\n\n\ndef get_global_mpr(num_processes):\n if num_processes == 1:\n return global_mpr_1p.runner\n elif num_processes == 2:\n return global_mpr_2p.runner\n else:\n raise ValueError(\"get_global_mpr: num_processes must be 1 or 2, got %d\" %\n num_processes)\n\n\nclass CollectiveOpsTest(test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super().setUp()\n # Enabling collectives can be done in \"setUpClass\", but requires using\n # different collective_keys in different tests as collectives are reused\n # across tests. Always resetting collective ops before each test offers\n # better test isolation.\n global_mpr_1p.runner.run(enable_collective_ops)\n global_mpr_2p.runner.run(enable_collective_ops)\n\n def make_collective(self, num_processes, gpu_per_process):\n \"\"\"Returns collectives and other info to be used in tests.\n\n Args:\n num_processes: an integer indicating the number of processes that\n participate in the collective.\n gpu_per_process: number of GPUs (0 if no GPUs) used by each process.\n\n Returns:\n A tuple of (collective, devices, pid) where collective is a instance\n of `CollectiveAllReduce`, devices are a list of local devices (str)\n attached to the current process, and pid is the id of this process among\n all participant processes.\n \"\"\"\n\n cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()\n devices = [\n \"/job:worker/replica:0/task:%d/device:CPU:0\" % cluster_resolver.task_id\n ]\n if gpu_per_process > 0:\n devices = [\n \"/job:worker/replica:0/task:%d/device:GPU:%d\" %\n (cluster_resolver.task_id, i) for i in range(gpu_per_process)\n ]\n group_size = num_processes * len(devices)\n collective = cross_device_ops_lib.CollectiveAllReduce(\n devices=devices, group_size=group_size)\n return collective, devices, cluster_resolver.task_id\n\n def as_list(self, value):\n \"\"\"An utility to convert a `Mirrored`, `Tensor` or `IndexedSlices` to a list.\n\n The reason it exists is to provide a uniformed view of returned value of\n \"reduce\" calls, especially across tf.function boundaries. Returning\n `Mirrored` from a tf.function will only evaluate the primary value, which\n makes collective ops of non-primary device being pruned, and will eventually\n cause hanging.\n\n Args:\n value: the value to convert, can be one of `Mirrored`, `Tensor` and\n `IndexedSlices`.\n\n Returns:\n A list of `Tensor` or `IndexedSlices`.\n \"\"\"\n if isinstance(value, ops.Tensor):\n return [value]\n elif isinstance(value, IndexedSlices):\n return [value]\n elif isinstance(value, value_lib.Mirrored):\n return value.values\n else:\n raise ValueError(\"unwrap: unsupported input type: %s\" % type(value))\n\n RunOptions = collections.namedtuple( # pylint: disable=invalid-name\n \"RunOptions\",\n [\n \"mode\", # A list of str from [\"eager\", \"func_graph\"]\n \"num_processes\",\n \"gpus_per_process\",\n \"reduce_op\",\n \"communication_options\",\n \"prefer_unique_instance_key\",\n ])\n RunOptions.__new__.__defaults__ = ([\"eager\",\n \"func_graph\"], 2, 0, ReduceOp.SUM,\n collective_util.Options(), True)\n\n def reduce_and_verify(self, inputs, expect, options):\n \"\"\"Reduce the given `inputs` and verify the output matches `expect`.\n\n Args:\n inputs: a list of `Tensor` or `IndexedSlices`, where i-th value will be\n fed to i-th replica.\n expect: a `Tensor` or `IndexedSlices`. This should be the expected value\n for one replica.\n options: a `RunOpotions` instance.\n \"\"\"\n\n def replica_fn():\n CollectiveReplicaLauncher._prefer_unique_instance_key = (\n options.prefer_unique_instance_key)\n collective, devices, pid = self.make_collective(options.num_processes,\n options.gpus_per_process)\n\n def reduce_fn():\n value_fn = lambda device_idx: inputs[pid * len(devices) + device_idx]\n per_replica_value = make_per_replica_value(value_fn, devices)\n reduced_values = collective.reduce(options.reduce_op, per_replica_value,\n per_replica_value,\n options.communication_options)\n reduced_values = self.as_list(reduced_values)\n self.assertAllEqual(devices, [v.device for v in reduced_values])\n return [ops.convert_to_tensor(v) for v in reduced_values]\n\n per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices)\n\n if \"eager\" in options.mode:\n got = reduce_fn()\n self.assertAllClose(got, per_replica_expect)\n\n if \"func_graph\" in options.mode:\n got = def_function.function(reduce_fn)()\n self.assertAllClose(got, per_replica_expect)\n\n get_global_mpr(options.num_processes).run(replica_fn)\n\n def batch_reduce_and_verify(self, inputs, expect, options):\n \"\"\"Batch reduce the given `inputs` and verify the output matches `expect`.\n\n Args:\n inputs: a 2-level nested list of `Tensor` or `IndexedSlices`, where i-th\n value will be fed to i-th replica.\n expect: a list of `Tensor` or `IndexedSlices`. This should be the expected\n value for one replica.\n options: a `RunOpotions` instance.\n \"\"\"\n\n def replica_fn():\n CollectiveReplicaLauncher._prefer_unique_instance_key = (\n options.prefer_unique_instance_key)\n collective, devices, pid = self.make_collective(options.num_processes,\n options.gpus_per_process)\n\n def batch_reduce_fn():\n batch_size = len(inputs[0])\n value_dst_pairs = []\n for i in range(batch_size):\n\n def value_fn(device_idx, idx=i):\n return inputs[pid * len(devices) + device_idx][idx]\n\n per_replica_value = make_per_replica_value(value_fn, devices)\n value_dst_pairs.append((per_replica_value, per_replica_value))\n reduced_values = collective.batch_reduce(options.reduce_op,\n value_dst_pairs,\n options.communication_options)\n reduced_values = [self.as_list(v) for v in reduced_values]\n for v in reduced_values:\n self.assertAllEqual(devices, [t.device for t in v])\n return nest.map_structure(ops.convert_to_tensor, reduced_values)\n\n per_replica_expect = nest.map_structure(\n lambda x: [ops.convert_to_tensor(x)] * len(devices), expect)\n\n if \"eager\" in options.mode:\n got = batch_reduce_fn()\n self.assertAllClose(got, per_replica_expect)\n\n if \"func_graph\" in options.mode:\n got = def_function.function(batch_reduce_fn)()\n self.assertAllClose(got, per_replica_expect)\n\n get_global_mpr(options.num_processes).run(replica_fn)\n\n @combinations.generate(\n combinations.combine(\n num_processes=[1, 2],\n required_gpus=[0, 1, 2],\n implementation=[\n CommunicationImplementation.AUTO,\n CommunicationImplementation.RING,\n CommunicationImplementation.NCCL,\n ],\n reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],\n prefer_unique_instance_key=[True, False]))\n def testReduceDense(self, num_processes, required_gpus, implementation,\n reduce_op, prefer_unique_instance_key):\n if (required_gpus == 0 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip CPU + NCCL combination\")\n if (num_processes == 2 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip NCCL + 2 processes combination. NCCL requires \"\n \"physical GPUs for every process.\")\n options = self.RunOptions(\n num_processes=num_processes,\n gpus_per_process=required_gpus,\n reduce_op=reduce_op,\n communication_options=collective_util.Options(\n implementation=implementation),\n prefer_unique_instance_key=prefer_unique_instance_key)\n group_size = options.num_processes * (options.gpus_per_process or 1)\n\n inputs_data = [1.0, 2.0, 3.0, 4.0]\n inputs = inputs_data[0:group_size]\n\n if group_size == 1:\n expect = 1.0\n if group_size == 2:\n expect = 3.0 if reduce_op == ReduceOp.SUM else 1.5\n elif group_size == 4:\n expect = 10.0 if reduce_op == ReduceOp.SUM else 2.5\n\n self.reduce_and_verify(inputs, expect, options)\n\n @combinations.generate(\n combinations.combine(\n num_processes=[1, 2],\n required_gpus=[0, 1, 2],\n implementation=[\n CommunicationImplementation.AUTO,\n CommunicationImplementation.RING,\n CommunicationImplementation.NCCL,\n ],\n # TODO(b/166682130): add MEAN reduce once the bug is fixed.\n reduce_op=ReduceOp.SUM,\n prefer_unique_instance_key=[True, False]))\n def testReduceSparse(self, num_processes, required_gpus, implementation,\n reduce_op, prefer_unique_instance_key):\n if (required_gpus == 0 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip CPU + NCCL combination\")\n if (num_processes == 2 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip NCCL + 2 processes combination. NCCL requires \"\n \"physical GPUs for every process.\")\n options = self.RunOptions(\n mode=[\"func_graph\"], # Sparse reduce is not supported in eager.\n num_processes=num_processes,\n gpus_per_process=required_gpus,\n reduce_op=reduce_op,\n communication_options=collective_util.Options(\n implementation=implementation),\n prefer_unique_instance_key=prefer_unique_instance_key)\n group_size = options.num_processes * (options.gpus_per_process or 1)\n\n inputs_data = [\n IndexedSlicesValue(\n values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),\n IndexedSlicesValue(\n values=[[3.], [4.]], indices=[1, 2], dense_shape=[10, 1]),\n IndexedSlicesValue(\n values=[[5.], [6.]], indices=[7, 8], dense_shape=[10, 1]),\n IndexedSlicesValue(\n values=[[7.], [8.]], indices=[3, 2], dense_shape=[10, 1]),\n ]\n inputs = inputs_data[0:group_size]\n\n if group_size == 1:\n expect = IndexedSlices(\n values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1])\n elif group_size == 2:\n expect = IndexedSlices(\n values=[[1.], [2.], [3.], [4.]],\n indices=[0, 1, 1, 2],\n dense_shape=[10, 1])\n elif group_size == 4:\n expect = IndexedSlices(\n values=[[1.], [2.], [3.], [4.], [5.], [6.], [7.], [8.]],\n indices=[0, 1, 1, 2, 7, 8, 3, 2],\n dense_shape=[10, 1])\n\n self.reduce_and_verify(inputs, expect, options)\n\n @combinations.generate(\n combinations.combine(prefer_unique_instance_key=[True, False]))\n def testReduceSparseVariableLength(self, prefer_unique_instance_key):\n # One device per process, 2 processes, 2 replicas in total.\n inputs = [\n IndexedSlicesValue(values=[[1.]], indices=[0], dense_shape=[10, 1]),\n IndexedSlicesValue(\n values=[[2.], [3.], [4.]], indices=[0, 1, 2], dense_shape=[10, 1]),\n ]\n expect = IndexedSlices(\n values=[[1.], [2.], [3.], [4.]],\n indices=[0, 0, 1, 2],\n dense_shape=[10, 1])\n self.reduce_and_verify(\n inputs,\n expect,\n self.RunOptions(\n mode=[\"func_graph\"], # Sparse reduce is not supported in eager.\n num_processes=2,\n reduce_op=ReduceOp.SUM,\n prefer_unique_instance_key=prefer_unique_instance_key))\n\n @combinations.generate(\n combinations.combine(\n num_processes=[1, 2],\n required_gpus=[0, 1, 2],\n implementation=[\n CommunicationImplementation.AUTO,\n CommunicationImplementation.RING,\n CommunicationImplementation.NCCL,\n ],\n reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],\n prefer_unique_instance_key=[True, False]))\n def testBatchReduceDense(self, num_processes, required_gpus, implementation,\n reduce_op, prefer_unique_instance_key):\n if (required_gpus == 0 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip CPU + NCCL combination\")\n if (num_processes == 2 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip NCCL + 2 processes combination. NCCL requires \"\n \"physical GPUs for every process.\")\n\n options = self.RunOptions(\n num_processes=num_processes,\n gpus_per_process=required_gpus,\n reduce_op=reduce_op,\n communication_options=collective_util.Options(\n implementation=implementation),\n prefer_unique_instance_key=prefer_unique_instance_key)\n group_size = options.num_processes * (options.gpus_per_process or 1)\n\n inputs_data = [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]\n inputs = inputs_data[0:group_size]\n\n if group_size == 1:\n expect = [1.0, 2.0]\n if group_size == 2:\n expect = [4.0, 6.0] if reduce_op == ReduceOp.SUM else [2.0, 3.0]\n elif group_size == 4:\n expect = [16.0, 20.0] if reduce_op == ReduceOp.SUM else [4.0, 5.0]\n\n self.batch_reduce_and_verify(inputs, expect, options)\n\n @combinations.generate(\n combinations.combine(\n num_processes=[1, 2],\n required_gpus=[0, 1, 2],\n implementation=[\n CommunicationImplementation.AUTO,\n CommunicationImplementation.RING,\n CommunicationImplementation.NCCL,\n ],\n # TODO(b/166682130): add MEAN reduce once the bug is fixed.\n reduce_op=ReduceOp.SUM,\n prefer_unique_instance_key=[True, False]))\n def testBatchReduceSparse(self, num_processes, required_gpus, implementation,\n reduce_op, prefer_unique_instance_key):\n if (required_gpus == 0 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip CPU + NCCL combination\")\n if (num_processes == 2 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip NCCL + 2 processes combination. NCCL requires \"\n \"physical GPUs for every process.\")\n\n options = self.RunOptions(\n mode=[\"func_graph\"], # Sparse reduce is not supported in eager.\n num_processes=num_processes,\n gpus_per_process=required_gpus,\n reduce_op=reduce_op,\n communication_options=collective_util.Options(\n implementation=implementation),\n prefer_unique_instance_key=prefer_unique_instance_key)\n group_size = options.num_processes * (options.gpus_per_process or 1)\n\n inputs_data = ([\n IndexedSlicesValue(\n values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),\n IndexedSlicesValue(\n values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])\n ], [\n IndexedSlicesValue(\n values=[[5.], [6.]], indices=[1, 2], dense_shape=[10, 1]),\n IndexedSlicesValue(\n values=[[7.], [8.]], indices=[0, 1], dense_shape=[5, 1])\n ], [\n IndexedSlicesValue(\n values=[[9.], [10.]], indices=[3, 4], dense_shape=[10, 1]),\n IndexedSlicesValue(\n values=[[11.], [12.]], indices=[3, 4], dense_shape=[5, 1])\n ], [\n IndexedSlicesValue(\n values=[[13.], [14.]], indices=[8, 9], dense_shape=[10, 1]),\n IndexedSlicesValue(\n values=[[15.], [16.]], indices=[3, 4], dense_shape=[5, 1])\n ])\n inputs = inputs_data[0:group_size]\n\n if group_size == 1:\n expect = [\n IndexedSlices(\n values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),\n IndexedSlicesValue(\n values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])\n ]\n if group_size == 2:\n expect = [\n IndexedSlices(\n values=[[1.], [2.], [5.], [6.]],\n indices=[0, 1, 1, 2],\n dense_shape=[10, 1]),\n IndexedSlices(\n values=[[3.], [4.], [7.], [8.]],\n indices=[1, 2, 3, 4],\n dense_shape=[5, 1])\n ]\n elif group_size == 4:\n expect = [\n IndexedSlices(\n values=[[1.], [2.], [5.], [6.], [9.], [10.], [13.], [14.]],\n indices=[0, 1, 1, 2, 3, 4, 8, 9],\n dense_shape=[10, 1]),\n IndexedSlices(\n values=[[3.], [4.], [7.], [8.], [11.], [12.], [15.], [16.]],\n indices=[1, 2, 0, 1, 3, 4, 3, 4],\n dense_shape=[5, 2])\n ]\n self.batch_reduce_and_verify(inputs, expect, options)\n\n @combinations.generate(\n combinations.combine(\n num_processes=[1, 2],\n required_gpus=[0, 1, 2],\n implementation=[\n CommunicationImplementation.AUTO,\n CommunicationImplementation.RING,\n CommunicationImplementation.NCCL,\n ],\n reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],\n ))\n def testAllReduceDense(self, num_processes, required_gpus, implementation,\n reduce_op):\n if (required_gpus == 0 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip CPU + NCCL combination\")\n if (num_processes == 2 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip NCCL + 2 processes combination. NCCL requires \"\n \"physical GPUs for every process.\")\n\n def replica_fn():\n collective, devices, _ = self.make_collective(num_processes,\n required_gpus)\n options = collective_util.Options(implementation=implementation)\n group_size = num_processes * (required_gpus or 1)\n\n @def_function.function\n def collective_all_reduce():\n results = []\n for replica_id, device in enumerate(devices):\n with ops.device(device):\n value = constant_op.constant(1.0)\n results.append(\n collective._all_reduce(reduce_op, value, replica_id, options))\n return results\n\n got = collective_all_reduce()\n if reduce_op == ReduceOp.SUM:\n expect = [1.0 * group_size] * len(devices)\n elif reduce_op == ReduceOp.MEAN:\n expect = [1.0] * len(devices)\n self.assertAllClose(got, expect)\n\n @def_function.function\n def collective_batch_all_reduce():\n results = []\n for replica_id, device in enumerate(devices):\n with ops.device(device):\n value = (constant_op.constant(1.0), constant_op.constant(2.0))\n results.append(\n collective._all_reduce(reduce_op, value, replica_id, options))\n return results\n\n got = collective_batch_all_reduce()\n if reduce_op == ReduceOp.SUM:\n expect = [(1.0 * group_size, 2.0 * group_size)] * len(devices)\n elif reduce_op == ReduceOp.MEAN:\n expect = [(1.0, 2.0)] * len(devices)\n self.assertAllClose(got, expect)\n\n get_global_mpr(num_processes).run(replica_fn)\n\n @combinations.generate(\n combinations.combine(\n num_processes=[1, 2],\n required_gpus=[0, 1, 2],\n implementation=[\n CommunicationImplementation.AUTO,\n CommunicationImplementation.RING,\n CommunicationImplementation.NCCL,\n ],\n reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],\n ))\n def testAllReduceSparse(self, num_processes, required_gpus, implementation,\n reduce_op):\n if (required_gpus == 0 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip CPU + NCCL combination\")\n if (num_processes == 2 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip NCCL + 2 processes combination. NCCL requires \"\n \"physical GPUs for every process.\")\n\n def replica_fn():\n collective, devices, _ = self.make_collective(num_processes,\n required_gpus)\n options = collective_util.Options(implementation=implementation)\n group_size = num_processes * (required_gpus or 1)\n\n @def_function.function\n def collective_all_reduce():\n results = []\n for replica_id, device in enumerate(devices):\n with ops.device(device):\n value = IndexedSlices(\n values=array_ops.identity([[1.]]),\n indices=array_ops.identity([0]),\n dense_shape=array_ops.identity([5, 1]))\n results.append(\n collective._all_reduce(reduce_op, value, replica_id, options))\n return results\n\n got = collective_all_reduce()\n if reduce_op == ReduceOp.SUM:\n expect = [IndexedSlices([[1. * group_size]], [0], [5, 1])\n ] * len(devices)\n elif reduce_op == ReduceOp.MEAN:\n expect = [IndexedSlices([[1.]], [0], [5, 1])] * len(devices)\n self.assertAllClose(\n nest.map_structure(ops.convert_to_tensor, got),\n nest.map_structure(ops.convert_to_tensor, expect))\n\n @def_function.function\n def collective_batch_all_reduce():\n results = []\n for replica_id, device in enumerate(devices):\n with ops.device(device):\n value = (IndexedSlices(\n array_ops.identity([[1.]]), array_ops.identity([0]),\n array_ops.identity([5, 1])),\n IndexedSlices(\n array_ops.identity([[3.]]), array_ops.identity([2]),\n array_ops.identity([5, 1])))\n results.append(\n collective._all_reduce(reduce_op, value, replica_id, options))\n return results\n\n got = collective_batch_all_reduce()\n if reduce_op == ReduceOp.SUM:\n expect = [(IndexedSlices([[1. * group_size]], [0], [5, 1]),\n IndexedSlices([[3. * group_size]], [2], [5, 1]))\n ] * len(devices)\n elif reduce_op == ReduceOp.MEAN:\n expect = [(IndexedSlices([[1.]], [0], [5, 1]),\n IndexedSlices([[3.]], [2], [5, 1]))] * len(devices)\n self.assertAllClose(\n nest.map_structure(ops.convert_to_tensor, got),\n nest.map_structure(ops.convert_to_tensor, expect))\n\n get_global_mpr(num_processes).run(replica_fn)\n\n @combinations.generate(\n combinations.combine(\n num_processes=2,\n required_gpus=0,\n implementation=CommunicationImplementation.AUTO,\n reduce_op=ReduceOp.SUM))\n def testAllReduceMixedDenseAndSparse(self, num_processes, required_gpus,\n implementation, reduce_op):\n\n def replica_fn():\n collective, devices, _ = self.make_collective(num_processes,\n required_gpus)\n options = collective_util.Options(implementation=implementation)\n group_size = num_processes * (required_gpus or 1)\n\n @def_function.function\n def collective_batch_all_reduce():\n results = []\n for replica_id, device in enumerate(devices):\n with ops.device(device):\n value = (IndexedSlices(\n array_ops.identity([[1.]]), array_ops.identity([0]),\n array_ops.identity([5, 1])), array_ops.identity(1.0),\n IndexedSlices(\n array_ops.identity([[3.]]), array_ops.identity([2]),\n array_ops.identity([5, 1])), array_ops.identity(2.0))\n results.append(\n collective._all_reduce(reduce_op, value, replica_id, options))\n return results\n\n got = collective_batch_all_reduce()\n expect = [\n (IndexedSlices([[1. * group_size]], [0], [5, 1]), 1.0 * group_size,\n IndexedSlices([[3. * group_size]], [2], [5, 1]), 2.0 * group_size)\n ] * len(devices)\n self.assertAllClose(\n nest.map_structure(ops.convert_to_tensor, got),\n nest.map_structure(ops.convert_to_tensor, expect))\n\n get_global_mpr(num_processes).run(replica_fn)\n\n @combinations.generate(\n combinations.combine(\n num_processes=[1, 2],\n required_gpus=[0, 1, 2],\n axis=[0, 1, 2],\n func_mode=[\"eager\", \"func_graph\"],\n implementation=[\n CommunicationImplementation.AUTO,\n CommunicationImplementation.RING,\n CommunicationImplementation.NCCL,\n ],\n prefer_unique_instance_key=[True, False]))\n def testAllGatherSameShape(self, num_processes, required_gpus, implementation,\n func_mode, axis, prefer_unique_instance_key):\n\n def replica_fn():\n CollectiveReplicaLauncher._prefer_unique_instance_key = (\n prefer_unique_instance_key)\n collective, devices, _ = self.make_collective(num_processes,\n required_gpus)\n options = collective_util.Options(implementation=implementation)\n value = constant_op.constant([[[1, 2], [1, 2]]], dtype=dtypes.float32)\n\n def gather_fn():\n per_replica_value = make_per_replica_value(value, devices)\n gathered_values = collective._gather(\n per_replica_value, per_replica_value, axis=axis, options=options)\n gathered_values = self.as_list(gathered_values)\n # Skip checking devices in eager. In eager the device attribute doesn't\n # reflect the actual device of the tensor.\n if not context.executing_eagerly():\n self.assertAllEqual(devices, [v.device for v in gathered_values])\n return [ops.convert_to_tensor(v) for v in gathered_values]\n\n group_size = num_processes * (required_gpus or 1)\n expect = array_ops.concat([value] * group_size, axis=axis)\n per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices)\n\n if func_mode == \"eager\":\n result = gather_fn()\n self.assertAllClose(result, per_replica_expect)\n\n if func_mode == \"func_graph\":\n result = def_function.function(gather_fn)()\n self.assertAllClose(result, per_replica_expect)\n\n get_global_mpr(num_processes).run(replica_fn)\n\n @combinations.generate(\n combinations.combine(\n num_processes=[1, 2],\n required_gpus=[0, 1, 2],\n implementation=[CommunicationImplementation.RING]))\n def testCollectiveV2ControlFlow(self, num_processes, required_gpus,\n implementation):\n\n def replica_fn():\n CollectiveReplicaLauncher._prefer_unique_instance_key = True\n collective, devices, _ = self.make_collective(num_processes,\n required_gpus)\n options = collective_util.Options(implementation=implementation)\n value = make_per_replica_value(constant_op.constant([1.]), devices)\n\n @def_function.function\n def reduce_fn():\n\n def cond_body():\n reduced = collective.reduce(reduce_util.ReduceOp.SUM, value, value,\n options)\n return math_ops.add_n(self.as_list(reduced)) / len(devices)\n\n return control_flow_ops.cond(\n array_ops.identity(False), cond_body, cond_body)\n\n num_replicas = num_processes * len(devices)\n self.assertAllEqual(reduce_fn(), [1. * num_replicas])\n\n get_global_mpr(num_processes).run(replica_fn)\n\n @combinations.generate(\n combinations.combine(\n num_processes=1,\n required_gpus=2,\n implementation=[\n CommunicationImplementation.NCCL, CommunicationImplementation.RING\n ],\n prefer_unique_instance_key=[True, False]))\n def testMultiThreadedCollectiveLaunchNoInterleave(self, num_processes,\n required_gpus,\n implementation,\n prefer_unique_instance_key):\n\n def replica_fn():\n CollectiveReplicaLauncher._prefer_unique_instance_key = (\n prefer_unique_instance_key)\n collective, devices, _ = self.make_collective(num_processes,\n required_gpus)\n options = collective_util.Options(implementation=implementation)\n\n # We would like to simulate the following sequence:\n # thread-0 device0 device1\n # thread-1 device0 device1\n # If the kernel launch sequence is as-is the program will deadlock since\n # NCCL requires the launch order to be same on each device.\n v0 = make_per_replica_value(1.0, devices)\n v1 = make_per_replica_value(2.0, devices)\n\n # Add a delay to collective_ops.all_reduce according to the input tensors\n # index in `sequence.`\n sequence = [v0.values[0], v1.values[0], v1.values[1], v0.values[1]]\n all_reduce = collective_ops.all_reduce\n\n def delayed_all_reduce(input_tensor, *args, **kwargs):\n for idx, v in enumerate(sequence):\n if input_tensor is v:\n time.sleep(idx)\n break\n return all_reduce(input_tensor, *args, **kwargs)\n\n with test.mock.patch.object(collective_ops, \"all_reduce\",\n delayed_all_reduce):\n # We only use NCCL for batch reduce with two or more values, so we use\n # two values here.\n\n def thread_fn():\n reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM,\n [(v0, v0), (v0, v0)], options)\n self.assertAllEqual(reduced[0].values, [2.0, 2.0])\n self.assertAllEqual(reduced[1].values, [2.0, 2.0])\n\n t = threading.Thread(target=thread_fn)\n t.start()\n reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v1, v1),\n (v1, v1)],\n options)\n self.assertAllEqual(reduced[0].values, [4.0, 4.0])\n self.assertAllEqual(reduced[1].values, [4.0, 4.0])\n t.join()\n\n get_global_mpr(num_processes).run(replica_fn)\n\n @combinations.generate(\n combinations.combine(\n num_processes=1,\n required_gpus=2,\n implementation=[\n CommunicationImplementation.NCCL, CommunicationImplementation.RING\n ],\n prefer_unique_instance_key=[True, False]))\n def testInputsAreFunctionArgs(self, num_processes, required_gpus,\n implementation, prefer_unique_instance_key):\n\n def replica_fn():\n CollectiveReplicaLauncher._prefer_unique_instance_key = (\n prefer_unique_instance_key)\n collective, devices, _ = self.make_collective(num_processes,\n required_gpus)\n options = collective_util.Options(implementation=implementation)\n\n @def_function.function\n def reduce_fn(v):\n # Function inputs don't have device placement.\n self.assertEqual(v.values[0].device, \"\")\n self.assertEqual(v.values[1].device, \"\")\n # We only use NCCL for batch reduce with two or more values, so we use\n # two values here.\n reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v, v),\n (v, v)],\n options)\n self.assertEqual(reduced[0].values[0].device, devices[0])\n self.assertEqual(reduced[0].values[1].device, devices[1])\n self.assertEqual(reduced[1].values[0].device, devices[0])\n self.assertEqual(reduced[1].values[1].device, devices[1])\n # Returning Mirrored only evaluates the primary value, which causes\n # hanging,\n return [reduced[0].values, reduced[1].values]\n\n v = make_per_replica_value(1.0, devices)\n reduced = reduce_fn(v)\n self.assertAllClose(reduced, [[2.0, 2.0], [2.0, 2.0]])\n\n get_global_mpr(num_processes).run(replica_fn)\n\n @combinations.generate(\n combinations.combine(\n num_processes=2,\n required_gpus=[0, 1],\n implementation=[\n CommunicationImplementation.RING, CommunicationImplementation.NCCL\n ],\n prefer_unique_instance_key=[True, False]))\n def testTimeoutReduceDense(self, num_processes, implementation, required_gpus,\n prefer_unique_instance_key):\n\n if (required_gpus == 0 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip CPU + NCCL combination\")\n\n def replica_fn():\n CollectiveReplicaLauncher._prefer_unique_instance_key = (\n prefer_unique_instance_key)\n collective, devices, task_id = self.make_collective(\n num_processes, required_gpus)\n if task_id != 0:\n return\n\n v = make_per_replica_value(1.0, devices)\n options = collective_util.Options(\n timeout_seconds=1, implementation=implementation)\n\n @def_function.function\n def reduce_dense():\n return collective.reduce(reduce_util.ReduceOp.SUM, v, v, options)\n\n # The collective should time out because we only launch it on worker-0,\n # while there're three workers in total.\n with self.assertRaises(errors.DeadlineExceededError):\n reduce_dense()\n\n get_global_mpr(num_processes).run(replica_fn)\n\n @combinations.generate(\n combinations.combine(\n num_processes=2,\n required_gpus=[0, 1],\n implementation=[\n CommunicationImplementation.RING, CommunicationImplementation.NCCL\n ],\n prefer_unique_instance_key=[True, False]))\n def testTimeoutBatchReduceDense(self, num_processes, implementation,\n required_gpus, prefer_unique_instance_key):\n if (required_gpus == 0 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip CPU + NCCL combination\")\n\n def replica_fn():\n CollectiveReplicaLauncher._prefer_unique_instance_key = (\n prefer_unique_instance_key)\n collective, devices, task_id = self.make_collective(\n num_processes, required_gpus)\n if task_id != 0:\n return\n\n v = make_per_replica_value(1.0, devices)\n options = collective_util.Options(\n timeout_seconds=1, implementation=implementation)\n\n @def_function.function\n def batch_reduce_dense():\n return collective.batch_reduce(reduce_util.ReduceOp.SUM,\n [(v, v), (v, v)], options)\n\n # The collective should time out because we only launch it on worker-0,\n # while there're two workers in total.\n with self.assertRaises(errors.DeadlineExceededError):\n batch_reduce_dense()\n\n get_global_mpr(num_processes).run(replica_fn)\n\n @combinations.generate(\n combinations.combine(\n num_processes=2,\n required_gpus=[0, 1],\n implementation=[\n CommunicationImplementation.RING, CommunicationImplementation.NCCL\n ],\n prefer_unique_instance_key=[True, False]))\n def testTimeoutReduceSparse(self, num_processes, implementation,\n required_gpus, prefer_unique_instance_key):\n if (required_gpus == 0 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip CPU + NCCL combination\")\n\n def replica_fn():\n CollectiveReplicaLauncher._prefer_unique_instance_key = (\n prefer_unique_instance_key)\n collective, devices, task_id = self.make_collective(\n num_processes, required_gpus)\n if task_id != 0:\n return\n\n v = make_per_replica_value(\n IndexedSlicesValue(\n values=[[4., 6.]], indices=[1], dense_shape=[5, 2]), devices)\n options = collective_util.Options(\n timeout_seconds=1, implementation=implementation)\n\n @def_function.function\n def reduce_sparse():\n return collective.reduce(reduce_util.ReduceOp.SUM, v, v, options)\n\n # The collective should time out because we only launch it on worker-0,\n # while there're two workers in total.\n with self.assertRaises(errors.DeadlineExceededError):\n reduce_sparse()\n\n get_global_mpr(num_processes).run(replica_fn)\n\n @combinations.generate(\n combinations.combine(\n num_processes=2,\n required_gpus=[0, 1],\n implementation=[\n CommunicationImplementation.RING, CommunicationImplementation.NCCL\n ],\n prefer_unique_instance_key=[True, False]))\n def testTimeoutBatchReduceSparse(self, num_processes, required_gpus,\n implementation, prefer_unique_instance_key):\n if (required_gpus == 0 and\n implementation == CommunicationImplementation.NCCL):\n self.skipTest(\"Skip CPU + NCCL combination\")\n\n def replica_fn():\n CollectiveReplicaLauncher._prefer_unique_instance_key = (\n prefer_unique_instance_key)\n collective, devices, task_id = self.make_collective(\n num_processes, required_gpus)\n if task_id != 0:\n return\n\n v = make_per_replica_value(\n IndexedSlicesValue(\n values=[[4., 6.]], indices=[1], dense_shape=[5, 2]), devices)\n options = collective_util.Options(\n timeout_seconds=1, implementation=implementation)\n\n @def_function.function\n def batch_reduce_sparse():\n return collective.batch_reduce(reduce_util.ReduceOp.SUM,\n [(v, v), (v, v)], options)\n\n # The collective should time out because we only launch it on worker-0,\n # while there're two workers in total.\n with self.assertRaises(errors.DeadlineExceededError):\n batch_reduce_sparse()\n\n get_global_mpr(num_processes).run(replica_fn)\n\n @combinations.generate(combinations.combine(num_processes=1, required_gpus=2))\n def testNcclOrdering(self, num_processes, required_gpus):\n\n def replica_fn():\n CollectiveReplicaLauncher._prefer_unique_instance_key = True\n CollectiveReplicaLauncher._prefer_ordering_token = True\n collective, devices, _ = self.make_collective(num_processes,\n required_gpus)\n options = collective_util.Options(\n implementation=CommunicationImplementation.NCCL)\n\n v_dense = make_per_replica_value([1.0, 1.0], devices)\n v_sparse = make_per_replica_value([\n IndexedSlicesValue([[4., 6.], [5., 6.]], [1, 3], [5, 2]),\n IndexedSlicesValue([[4., 6.], [5., 6.]], [1, 3], [5, 2]),\n ], devices)\n\n @def_function.function\n def nested_dense():\n collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)\n\n @def_function.function\n def nested_sparse():\n collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)\n\n # All collectives, function calls, if clause and while loops should be\n # chained by control dependencies, so that the execution order is\n # deterministic.\n @def_function.function\n def f():\n # pylint: disable=pointless-statement\n collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)\n # reducing dense value.\n collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)\n # reducing sparse value.\n collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)\n # reduce dense value in nested tf.function.\n nested_dense()\n # reduce sparse value in nested tf.function.\n nested_sparse()\n # reduce dense value in tf.cond.\n if array_ops.identity(1.0) > array_ops.identity(2.0):\n collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)\n else:\n v_dense\n # reduce sparse value in tf.cond.\n if array_ops.identity(1.0) > array_ops.identity(2.0):\n v_sparse\n else:\n collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse,\n options)\n # reduce dense value in tf.while_loop.\n i = array_ops.identity(1)\n while i < 3:\n collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)\n i += 1\n # reduce sparse value in tf.while_loop.\n i = array_ops.identity(1)\n while i < 3:\n collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse,\n options)\n i += 1\n # reducing dense and sparse value again.\n collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)\n collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)\n # pylint: enable=pointless-statement\n\n graph = f.get_concrete_function().graph\n should_be_ordered = set([\n \"CollectiveReduceV2\", \"CollectiveGatherV2\", \"If\", \"While\",\n \"StatefulPartitionedCall\"\n ])\n nodes_by_device = {}\n for op in graph.get_operations():\n if op.type in should_be_ordered:\n if op.device not in nodes_by_device:\n nodes_by_device[op.device] = []\n nodes_by_device[op.device].append(op)\n order = test_util.topological_sort_operations(graph.get_operations())\n for device in devices:\n device = device_util.canonicalize(device)\n # Those function ops don't have device annotations, but they contain\n # collectives for both devices so we always include them.\n operations = nodes_by_device[device] + nodes_by_device[\"\"]\n # Verify that we get all types of nodes we want.\n self.assertEqual(set(op.type for op in operations), should_be_ordered)\n test_util.assert_sequential_execution(order, operations)\n\n get_global_mpr(num_processes).run(replica_fn)\n\n\nif __name__ == \"__main__\":\n # Set default inter op thread pool size to one to ensure we don't exhaust the\n # thread pool with the additional executors to run collectives in eager.\n os.environ[\"TF_NUM_INTEROP_THREADS\"] = \"1\"\n # TODO(b/172304955): figure why logical devices doesn't work.\n test_util.main(config_logical_devices=False)\n"
] |
[
[
"tensorflow.python.distribute.test_util.assert_sequential_execution",
"tensorflow.python.distribute.multi_process_runner.MultiProcessPoolRunner",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.context",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.distribute.multi_worker_test_base.create_cluster_spec",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.distribute.combinations.combine",
"tensorflow.python.distribute.test_util.main",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.distribute.collective_util.Options",
"tensorflow.python.distribute.values.PerReplica",
"tensorflow.python.distribute.device_util.canonicalize",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.distribute.cross_device_ops.CollectiveAllReduce",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.distribute.cluster_resolver.TFConfigClusterResolver",
"tensorflow.python.eager.test.mock.patch.object",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.constant_op.constant"
]
] |
liangshi7/Ax
|
[
"f20d10f619aae504f2e4509b5a786842a6c72e89"
] |
[
"ax/storage/json_store/decoder.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nimport datetime\nfrom collections import OrderedDict\nfrom enum import Enum\nfrom inspect import isclass\nfrom typing import Any, Dict, List, Type\n\nimport pandas as pd\nfrom ax.core.base_trial import BaseTrial\nfrom ax.core.data import Data # noqa F401\nfrom ax.core.experiment import Experiment\nfrom ax.core.generator_run import GeneratorRun\nfrom ax.core.parameter import Parameter\nfrom ax.core.parameter_constraint import (\n OrderConstraint,\n ParameterConstraint,\n SumConstraint,\n)\nfrom ax.core.search_space import SearchSpace\nfrom ax.core.simple_experiment import (\n SimpleExperiment,\n unimplemented_evaluation_function,\n)\nfrom ax.exceptions.storage import JSONDecodeError\nfrom ax.modelbridge.generation_strategy import GenerationStrategy\nfrom ax.modelbridge.registry import Models\nfrom ax.modelbridge.transforms.base import Transform\nfrom ax.storage.json_store.decoders import batch_trial_from_json, trial_from_json\nfrom ax.storage.json_store.registry import DECODER_REGISTRY\nfrom ax.storage.transform_registry import REVERSE_TRANSFORM_REGISTRY\n\n\ndef object_from_json(object_json: Any) -> Any:\n \"\"\"Recursively load objects from a JSON-serializable dictionary.\"\"\"\n if type(object_json) in (str, int, float, bool, type(None)) or isinstance(\n object_json, Enum\n ):\n return object_json\n elif isinstance(object_json, list):\n return [object_from_json(i) for i in object_json]\n elif isinstance(object_json, tuple):\n return tuple(object_from_json(i) for i in object_json)\n elif isinstance(object_json, dict):\n if \"__type\" not in object_json:\n # this is just a regular dictionary, e.g. the one in Parameter\n # containing parameterizations\n return {k: object_from_json(v) for k, v in object_json.items()}\n\n _type = object_json.pop(\"__type\")\n\n if _type == \"datetime\":\n return datetime.datetime.strptime(\n object_json[\"value\"], \"%Y-%m-%d %H:%M:%S.%f\"\n )\n elif _type == \"OrderedDict\":\n return OrderedDict(\n [(k, object_from_json(v)) for k, v in object_json[\"value\"]]\n )\n elif _type == \"DataFrame\":\n # Need dtype=False, otherwise infers arm_names like \"4_1\"\n # should be int 41\n return pd.read_json(object_json[\"value\"], dtype=False)\n elif _type not in DECODER_REGISTRY:\n err = (\n f\"The JSON dictionary passed to `object_from_json` has a type \"\n f\"{_type} that is not registered with a corresponding class in \"\n f\"DECODER_REGISTRY.\"\n )\n raise JSONDecodeError(err)\n\n _class = DECODER_REGISTRY[_type]\n\n if isclass(_class) and issubclass(_class, Enum):\n # to access enum members by name, use item access\n return _class[object_json[\"name\"]]\n elif _class == GeneratorRun:\n return generator_run_from_json(object_json=object_json)\n elif _class == SimpleExperiment:\n return simple_experiment_from_json(object_json=object_json)\n elif _class == Experiment:\n return experiment_from_json(object_json=object_json)\n elif _class == SearchSpace:\n return search_space_from_json(search_space_json=object_json)\n elif _class == Type[Transform]:\n return transform_type_from_json(object_json=object_json)\n\n return _class(**{k: object_from_json(v) for k, v in object_json.items()})\n else:\n err = (\n f\"The object {object_json} passed to `object_from_json` has an \"\n f\"unsupported type: {type(object_json)}.\"\n )\n raise JSONDecodeError(err)\n\n\ndef generator_run_from_json(object_json: Dict[str, Any]) -> GeneratorRun:\n \"\"\"Load Ax GeneratorRun from JSON.\"\"\"\n time_created_json = object_json.pop(\"time_created\")\n type_json = object_json.pop(\"generator_run_type\")\n index_json = object_json.pop(\"index\")\n generator_run = GeneratorRun(\n **{k: object_from_json(v) for k, v in object_json.items()}\n )\n generator_run._time_created = object_from_json(time_created_json)\n generator_run._generator_run_type = object_from_json(type_json)\n generator_run._index = object_from_json(index_json)\n return generator_run\n\n\ndef search_space_from_json(search_space_json: Dict[str, Any]) -> SearchSpace:\n \"\"\"Load a SearchSpace from JSON.\n\n This function is necessary due to the coupled loading of SearchSpace\n and parameter constraints.\n \"\"\"\n parameters = object_from_json(search_space_json.pop(\"parameters\"))\n json_param_constraints = search_space_json.pop(\"parameter_constraints\")\n return SearchSpace(\n parameters=parameters,\n parameter_constraints=parameter_constraints_from_json(\n parameter_constraint_json=json_param_constraints, parameters=parameters\n ),\n )\n\n\ndef parameter_constraints_from_json(\n parameter_constraint_json: List[Dict[str, Any]], parameters: List[Parameter]\n) -> List[ParameterConstraint]:\n \"\"\"Load ParameterConstraints from JSON.\n\n Order and SumConstraint are tied to a search space,\n and require that SearchSpace's parameters to be passed in for decoding.\n\n Args:\n parameter_constraint_json: JSON representation of parameter constraints.\n parameters: Parameter definitions for decoding via parameter names.\n\n Returns:\n parameter_constraints: Python classes for parameter constraints.\n \"\"\"\n parameter_constraints = []\n parameter_map = {p.name: p for p in parameters}\n for constraint in parameter_constraint_json:\n if constraint[\"__type\"] == \"OrderConstraint\":\n lower_parameter = parameter_map[constraint[\"lower_name\"]]\n upper_parameter = parameter_map[constraint[\"upper_name\"]]\n parameter_constraints.append(\n OrderConstraint(\n lower_parameter=lower_parameter, upper_parameter=upper_parameter\n )\n )\n elif constraint[\"__type\"] == \"SumConstraint\":\n parameters = [parameter_map[name] for name in constraint[\"parameter_names\"]]\n parameter_constraints.append(\n SumConstraint(\n parameters=parameters,\n is_upper_bound=constraint[\"is_upper_bound\"],\n bound=constraint[\"bound\"],\n )\n )\n else:\n parameter_constraints.append(object_from_json(constraint))\n return parameter_constraints\n\n\ndef trials_from_json(\n experiment: Experiment, trials_json: Dict[str, Any]\n) -> Dict[int, BaseTrial]:\n \"\"\"Load Ax Trials from JSON.\"\"\"\n loaded_trials = {}\n for index, batch_json in trials_json.items():\n is_trial = batch_json[\"__type\"] == \"Trial\"\n batch_json = {\n k: object_from_json(v) for k, v in batch_json.items() if k != \"__type\"\n }\n loaded_trials[int(index)] = (\n trial_from_json(experiment=experiment, **batch_json)\n if is_trial\n else batch_trial_from_json(experiment=experiment, **batch_json)\n )\n return loaded_trials\n\n\ndef data_from_json(\n data_by_trial_json: Dict[str, Any]\n) -> Dict[int, \"OrderedDict[int, Data]\"]:\n \"\"\"Load Ax Data from JSON.\"\"\"\n data_by_trial = object_from_json(data_by_trial_json)\n # hack necessary because Python's json module converts dictionary\n # keys to strings: https://stackoverflow.com/q/1450957\n return {\n int(k): OrderedDict({int(k2): v2 for k2, v2 in v.items()})\n for k, v in data_by_trial.items()\n }\n\n\ndef simple_experiment_from_json(object_json: Dict[str, Any]) -> SimpleExperiment:\n \"\"\"Load AE SimpleExperiment from JSON.\"\"\"\n time_created_json = object_json.pop(\"time_created\")\n trials_json = object_json.pop(\"trials\")\n experiment_type_json = object_json.pop(\"experiment_type\")\n data_by_trial_json = object_json.pop(\"data_by_trial\")\n description_json = object_json.pop(\"description\")\n is_test_json = object_json.pop(\"is_test\")\n optimization_config = object_from_json(object_json.pop(\"optimization_config\"))\n\n # not relevant to simple experiment\n del object_json[\"tracking_metrics\"]\n del object_json[\"runner\"]\n\n kwargs = {k: object_from_json(v) for k, v in object_json.items()}\n kwargs[\"evaluation_function\"] = unimplemented_evaluation_function\n kwargs[\"objective_name\"] = optimization_config.objective.metric.name\n kwargs[\"minimize\"] = optimization_config.objective.minimize\n kwargs[\"outcome_constraints\"] = optimization_config.outcome_constraints\n experiment = SimpleExperiment(**kwargs)\n\n experiment.description = object_from_json(description_json)\n experiment.is_test = object_from_json(is_test_json)\n experiment._time_created = object_from_json(time_created_json)\n experiment._trials = trials_from_json(experiment, trials_json)\n for trial in experiment._trials.values():\n for arm in trial.arms:\n experiment._arms_by_signature[arm.signature] = arm\n if experiment.status_quo is not None:\n # pyre-fixme[16]: Optional type has no attribute `signature`.\n sq_sig = experiment.status_quo.signature\n experiment._arms_by_signature[sq_sig] = experiment.status_quo\n experiment._experiment_type = object_from_json(experiment_type_json)\n experiment._data_by_trial = data_from_json(data_by_trial_json)\n return experiment\n\n\ndef experiment_from_json(object_json: Dict[str, Any]) -> Experiment:\n \"\"\"Load Ax Experiment from JSON.\"\"\"\n time_created_json = object_json.pop(\"time_created\")\n trials_json = object_json.pop(\"trials\")\n experiment_type_json = object_json.pop(\"experiment_type\")\n data_by_trial_json = object_json.pop(\"data_by_trial\")\n experiment = Experiment(**{k: object_from_json(v) for k, v in object_json.items()})\n experiment._time_created = object_from_json(time_created_json)\n experiment._trials = trials_from_json(experiment, trials_json)\n for trial in experiment._trials.values():\n for arm in trial.arms:\n experiment._arms_by_signature[arm.signature] = arm\n if experiment.status_quo is not None:\n # pyre-fixme[16]: Optional type has no attribute `signature`.\n sq_sig = experiment.status_quo.signature\n experiment._arms_by_signature[sq_sig] = experiment.status_quo\n experiment._experiment_type = object_from_json(experiment_type_json)\n experiment._data_by_trial = data_from_json(data_by_trial_json)\n return experiment\n\n\ndef transform_type_from_json(object_json: Dict[str, Any]) -> Type[Transform]:\n \"\"\"Load the transform type from JSON.\"\"\"\n index_in_registry = object_json.pop(\"index_in_registry\")\n if index_in_registry not in REVERSE_TRANSFORM_REGISTRY: # pragma: no cover\n raise ValueError(f\"Unknown transform '{object_json.pop('transform_type')}'\")\n return REVERSE_TRANSFORM_REGISTRY[index_in_registry]\n\n\ndef generation_strategy_from_json(\n generation_strategy_json: Dict[str, Any]\n) -> GenerationStrategy:\n \"\"\"Load generation strategy from JSON.\"\"\"\n steps = object_from_json(generation_strategy_json.pop(\"steps\"))\n gs = GenerationStrategy(steps=steps, name=generation_strategy_json.pop(\"name\"))\n gs._db_id = object_from_json(generation_strategy_json.pop(\"db_id\"))\n gs._experiment = object_from_json(generation_strategy_json.pop(\"experiment\"))\n gs._generated = generation_strategy_json.pop(\"generated\")\n gs._observed = generation_strategy_json.pop(\"observed\")\n gs._data = object_from_json(generation_strategy_json.pop(\"data\"))\n gs._curr = gs._steps[generation_strategy_json.pop(\"curr_index\")]\n gs._generator_runs = object_from_json(\n generation_strategy_json.pop(\"generator_runs\")\n )\n if generation_strategy_json.pop(\"had_initialized_model\"): # pragma: no cover\n # If model in the current step was not directly from the `Models` enum,\n # pass its type to `restore_model_from_generator_run`, which will then\n # attempt to use this type to recreate the model.\n if type(gs._curr.model) != Models:\n models_enum = type(gs._curr.model)\n assert issubclass(models_enum, Models)\n # pyre-ignore[6]: `models_enum` typing hackiness\n gs._restore_model_from_generator_run(models_enum=models_enum)\n return gs\n\n gs._restore_model_from_generator_run()\n return gs\n"
] |
[
[
"pandas.read_json"
]
] |
BlaineWD/mmf
|
[
"9c98e2b196fae75df702b6adab3468017c909266"
] |
[
"plotting_utils/generate_cross_entropy_plots.py"
] |
[
"import argparse\nimport os\nimport sys\nimport json\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nfrom tqdm import tqdm\n\n\noutput_path = 'experiment_plots'\n\nif not os.path.exists(output_path):\n os.makedirs(output_path)\n\ncross_entropy_key = 'hateful_memes/cross_entropy'\ntrain_cross_entropy = f'train/{cross_entropy_key}'\nvalidation_cross_entropy = f'val/{cross_entropy_key}'\ntest_cross_entropy = f'test/{cross_entropy_key}'\nlogistics_line_start = 'mmf.trainers.callbacks.logistics : {'\n\n\ndef get_metrics_from_logs(log_paths):\n metrics = {}\n train_epoch_step = None\n validation_epoch_step = None\n print(f'Reading in experiment logs...')\n for log_file in tqdm(log_paths):\n logistics_lines = []\n with open(log_file) as f:\n log_lines = f.readlines()\n metrics[log_file] = {\n 'train_cross_entropy': [], 'validation_cross_entropy': [], 'test_cross_entropy': []\n }\n\n for line in log_lines[2:]:\n if 'mmf.trainers.callbacks.logistics' in line:\n logistics_lines.append(line)\n\n for logistics_line in logistics_lines:\n if 'Finished run' in logistics_line:\n continue\n\n current_metrics_string = logistics_line[\n logistics_line.find(logistics_line_start) + len(logistics_line_start) - 1:]\n current_metrics = json.loads(current_metrics_string)\n progress = current_metrics['progress']\n\n if train_cross_entropy in current_metrics:\n if train_epoch_step is None:\n train_epoch_step = int(progress[:progress.find('/')])\n metrics[log_file]['train_cross_entropy'].append(float(current_metrics[train_cross_entropy]))\n elif validation_cross_entropy in current_metrics:\n if validation_epoch_step is None:\n validation_epoch_step = int(progress[:progress.find('/')])\n metrics[log_file]['validation_cross_entropy'].append(\n float(current_metrics[validation_cross_entropy]))\n elif test_cross_entropy in current_metrics:\n metrics[log_file]['test_cross_entropy'].append(float(current_metrics[test_cross_entropy]))\n\n print('\\nFound metrics:')\n print(metrics)\n print(f'\\nTrain step: {train_epoch_step}')\n print(f'\\nValidation step: {validation_epoch_step}')\n return metrics, train_epoch_step, validation_epoch_step\n\n\ndef write_plots(metrics, metric_type, model_name, epoch_step, output_path):\n figure(figsize=(10, 8))\n x_axis_name = 'Iteration'\n print(f'\\nWriting {metric_type} plots to {output_path}...')\n metric_config_names = metrics.keys()\n\n plt.title(f'{metric_type.title()} Cross Entropy Loss across Augmentation levels for {model_name}')\n plt.xlabel(x_axis_name)\n plt.ylabel('Cross Entropy Loss')\n\n for key in tqdm(metric_config_names):\n cross_entropy_metrics = metrics[key][f'{metric_type}_cross_entropy']\n epoch_increments = [(i + 1) * epoch_step for i in range(len(cross_entropy_metrics))]\n plt.plot(epoch_increments, cross_entropy_metrics)\n\n augmentation_levels = [\n \"unmodified\",\n \"25% of images transformed\",\n \"50% of images transformed\",\n \"75% of images transformed\",\n \"100% of images transformed\"\n ]\n plt.legend(augmentation_levels, ncol=2, loc='best')\n plt.savefig(os.path.join(output_path, f'{metric_type}-{model_name.replace(\" \", \"-\").lower()}-cross-entropy.png'))\n plt.clf()\n\nvisual_bert_input_paths = [\n os.path.join('experiment_logs_unaugmented', 'train_2021_07_30T06_16_25.log'),\n os.path.join('experiment_logs_0.25_augmented', 'train_2021_08_01T07_29_23.log'),\n os.path.join('experiment_logs_0.5_augmented', 'train_2021_08_01T02_07_12.log'),\n os.path.join('experiment_logs_0.75_augmented', 'train_2021_08_01T16_22_49.log'),\n os.path.join('experiment_logs_augmented', 'train_2021_07_31T13_23_55.log')\n]\n\nmetrics, train_epoch_step, validation_epoch_step = get_metrics_from_logs(visual_bert_input_paths)\nwrite_plots(metrics, 'train', 'Visual BERT', train_epoch_step, output_path)\nwrite_plots(metrics, 'validation', 'Visual BERT', validation_epoch_step, output_path)\n\nvisual_bert_with_coco_input_paths = [\n os.path.join('experiment_logs_unaugmented', 'train_2021_07_30T07_07_02.log'),\n os.path.join('experiment_logs_0.25_augmented', 'train_2021_08_01T08_44_50.log'),\n os.path.join('experiment_logs_0.5_augmented', 'train_2021_08_01T03_22_25.log'),\n os.path.join('experiment_logs_0.75_augmented', 'train_2021_08_01T17_38_07.log'),\n os.path.join('experiment_logs_augmented', 'train_2021_07_31T14_39_11.log')\n]\n\nmetrics, train_epoch_step, validation_epoch_step = get_metrics_from_logs(visual_bert_with_coco_input_paths)\nwrite_plots(metrics, 'train', 'Visual BERT COCO', train_epoch_step, output_path)\nwrite_plots(metrics, 'validation', 'Visual BERT COCO', validation_epoch_step, output_path)\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
DanitYanowsky/IML.HUJI
|
[
"391b661ede3fdbb72ecdf900c32df69445b3868b"
] |
[
"IMLearn/learners/classifiers/gaussian_naive_bayes.py"
] |
[
"from typing import NoReturn\nfrom ...base import BaseEstimator\nimport numpy as np\nfrom IMLearn.learners import MultivariateGaussian\n\n\nclass GaussianNaiveBayes(BaseEstimator):\n \"\"\"\n Gaussian Naive-Bayes classifier\n \"\"\"\n def __init__(self):\n \"\"\"\n Instantiate a Gaussian Naive Bayes classifier\n\n Attributes\n ----------\n self.classes_ : np.ndarray of shape (n_classes,)\n The different labels classes. To be set in `GaussianNaiveBayes.fit`\n\n self.mu_ : np.ndarray of shape (n_classes,n_features)\n The estimated features means for each class. To be set in `GaussianNaiveBayes.fit`\n\n self.vars_ : np.ndarray of shape (n_classes, n_features)\n The estimated features variances for each class. To be set in `GaussianNaiveBayes.fit`\n\n self.pi_: np.ndarray of shape (n_classes)\n The estimated class probabilities. To be set in `GaussianNaiveBayes.fit`\n \"\"\"\n super().__init__()\n self.multi_k_array_ = None\n self.classes_, self.mu_, self.vars_, self.pi_ = None, None, None, None\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n fits a gaussian naive bayes model\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n \"\"\"\n self.classes_, self.pi_=np.unique(y, return_counts=True)\n m=X.shape[0]\n self.pi_ = self.pi_ / m\n _X = np.column_stack((X, y))\n K=self.classes_.shape[0]\n if len(X.shape)==1:\n d = 1\n else:\n d=X.shape[1]\n self.mu_=np.zeros((K,d))\n self.vars_=np.zeros((K,d))\n self.multi_k_array = []\n \n for k in range(K):\n k_multi=MultivariateGaussian()\n X_k = _X[_X[:, -1]==self.classes_[k]] ##Data specific to k\n k_multi.fit(X_k[:,:-1])\n self.mu_[k,:] = k_multi.mu_ ##insert mu_k to the array\n self.vars_[k,:] = np.var(X_k[:,:-1], axis=0)\n self.multi_k_array.append(k_multi) ##insert the instance to the array \n self.fitted_=True\n \n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n return np.argmax(self.likelihood(X), axis=1) ##axis=1 is max on each line\n\n def likelihood(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Calculate the likelihood of a given data over the estimated model\n\n Parameters\n ----------\n X : np.ndarray of shape (n_samples, n_features)\n Input data to calculate its likelihood over the different classes.\n\n Returns\n -------\n likelihoods : np.ndarray of shape (n_samples, n_classes)\n The likelihood for each sample under each of the classes\n\n \"\"\"\n if not self.fitted_:\n raise ValueError(\"Estimator must first be fitted before calling `likelihood` function\")\n m_samples = X.shape[0]\n k_classes = self.classes_.shape[0]\n likelihoods = np.zeros((m_samples, k_classes))\n for k in range(k_classes): ##for every class calculate pdf\n exp_value = -((X-self.mu_[k])**2)/(2*self.vars_[k])\n const = -0.5*np.log(2 * np.pi * self.vars_[k])\n value_to_sum =const+exp_value\n likelihoods[:, k] = np.sum(value_to_sum, 1) +np.log(self.pi_[k])\n return likelihoods\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under misclassification loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under missclassification loss function\n \"\"\"\n from ...metrics import misclassification_error\n return misclassification_error(y,self.predict(X))\n"
] |
[
[
"numpy.log",
"numpy.unique",
"numpy.column_stack",
"numpy.var",
"numpy.zeros",
"numpy.sum"
]
] |
paper1517/sonifications-paper1517
|
[
"f619ba306db8513e0a2f92970ddbec59e47cb27c",
"f619ba306db8513e0a2f92970ddbec59e47cb27c"
] |
[
"src/models/cnn12_decoder.py",
"src/utilities/feature_evolution_helper.py"
] |
[
"import torch\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom collections import OrderedDict\nfrom src.models.fp32_group_norm import Fp32GroupNorm\nfrom src.layers.scaled_std_conv import ScaledStdConv1d\nfrom src.layers.causal_utils import ScaledStdCausalConv1d\nfrom src.layers.scaled_std_conv import ScaledStdConvTranspose1d\nfrom src.layers.causal_utils import ScaledStdCausalConvTranspose1d\nfrom src.layers.gamma_act import GammaAct, act_with_gamma\nfrom src.layers import activation_helper\n\n\nclass DeconvolutionalDecoder(nn.Module):\n def __init__(self, encoder, causal=False):\n super(DeconvolutionalDecoder, self).__init__()\n activation = encoder.activation\n self.causal = causal\n encoder_state_dict = encoder.state_dict()\n\n self.kernel_sizes = encoder.kernel_sizes\n self.strides = encoder.strides\n self.filter_coeff = encoder.filter_coeff\n\n self.deconv_act11 = activation_helper.Activation(activation)\n self.deconv_conv11 = nn.ConvTranspose1d(int(self.filter_coeff * 2048), int(self.filter_coeff * 1024),\n kernel_size=(self.kernel_sizes[10],),\n stride=(1,), bias=False)\n self.deconv_conv11.weight = nn.Parameter(encoder_state_dict['features.conv11.weight'])\n\n self.deconv_act10 = activation_helper.Activation(activation)\n self.deconv_conv10 = nn.ConvTranspose1d(int(self.filter_coeff * 1024), int(self.filter_coeff * 1024),\n kernel_size=(self.kernel_sizes[9],),\n stride=(1,), bias=False)\n self.deconv_conv10.weight = nn.Parameter(encoder_state_dict['features.conv10.weight'])\n\n self.deconv_act9 = activation_helper.Activation(activation)\n self.deconv_conv9 = nn.ConvTranspose1d(int(self.filter_coeff * 1024), int(self.filter_coeff * 512),\n kernel_size=(self.kernel_sizes[8],),\n stride=(1,), bias=False)\n self.deconv_conv9.weight = nn.Parameter(encoder_state_dict['features.conv9.weight'])\n\n self.deconv_mp8 = nn.MaxUnpool1d(self.strides[7], self.strides[7])\n self.deconv_act8 = activation_helper.Activation(activation)\n self.deconv_conv8 = nn.ConvTranspose1d(int(self.filter_coeff * 512), int(self.filter_coeff * 512),\n kernel_size=(self.kernel_sizes[7],),\n stride=(1,), bias=False)\n self.deconv_conv8.weight = nn.Parameter(encoder_state_dict['features.conv8.weight'])\n\n self.deconv_mp7 = nn.MaxUnpool1d(self.strides[6], self.strides[6])\n self.deconv_act7 = activation_helper.Activation(activation)\n self.deconv_conv7 = nn.ConvTranspose1d(int(self.filter_coeff * 512), int(self.filter_coeff * 256),\n kernel_size=(self.kernel_sizes[6],),\n stride=(1,), bias=False)\n self.deconv_conv7.weight = nn.Parameter(encoder_state_dict['features.conv7.weight'])\n\n self.deconv_mp6 = nn.MaxUnpool1d(self.strides[5], self.strides[5])\n self.deconv_act6 = activation_helper.Activation(activation)\n self.deconv_conv6 = nn.ConvTranspose1d(int(self.filter_coeff * 256), int(self.filter_coeff * 256),\n kernel_size=(self.kernel_sizes[5],),\n stride=(1,), bias=False)\n self.deconv_conv6.weight = nn.Parameter(encoder_state_dict['features.conv6.weight'])\n\n self.deconv_mp5 = nn.MaxUnpool1d(self.strides[4], self.strides[4])\n self.deconv_act5 = activation_helper.Activation(activation)\n self.deconv_conv5 = nn.ConvTranspose1d(int(self.filter_coeff * 256), int(self.filter_coeff * 128),\n kernel_size=(self.kernel_sizes[4],),\n stride=(1,), bias=False)\n self.deconv_conv5.weight = nn.Parameter(encoder_state_dict['features.conv5.weight'])\n\n self.deconv_mp4 = nn.MaxUnpool1d(self.strides[3], self.strides[3])\n self.deconv_act4 = activation_helper.Activation(activation)\n self.deconv_conv4 = nn.ConvTranspose1d(int(self.filter_coeff * 128), int(self.filter_coeff * 128),\n kernel_size=(self.kernel_sizes[3],),\n stride=(1,), bias=False)\n self.deconv_conv4.weight = nn.Parameter(encoder_state_dict['features.conv4.weight'])\n\n self.deconv_mp3 = nn.MaxUnpool1d(self.strides[2], self.strides[2])\n self.deconv_act3 = activation_helper.Activation(activation)\n self.deconv_conv3 = nn.ConvTranspose1d(int(self.filter_coeff * 128), int(self.filter_coeff * 64),\n kernel_size=(self.kernel_sizes[2],),\n stride=(1,), bias=False)\n self.deconv_conv3.weight = nn.Parameter(encoder_state_dict['features.conv3.weight'])\n\n self.deconv_mp2 = nn.MaxUnpool1d(self.strides[1], self.strides[1])\n self.deconv_act2 = activation_helper.Activation(activation)\n self.deconv_conv2 = nn.ConvTranspose1d(int(self.filter_coeff * 64), int(self.filter_coeff * 64),\n kernel_size=(self.kernel_sizes[1],),\n stride=(1,), bias=False)\n self.deconv_conv2.weight = nn.Parameter(encoder_state_dict['features.conv2.weight'])\n\n self.deconv_mp1 = nn.MaxUnpool1d(self.strides[0], self.strides[0])\n self.deconv_act1 = activation_helper.Activation(activation)\n self.deconv_conv1 = nn.ConvTranspose1d(int(self.filter_coeff * 64), 1,\n kernel_size=(self.kernel_sizes[0],),\n stride=(1,), bias=False)\n self.deconv_conv1.weight = nn.Parameter(encoder_state_dict['features.conv1.weight'])\n\n def deconvolve(self, batch, input, encoder_features, switches, target_layer):\n if target_layer < 1 or target_layer > 11:\n raise ValueError(\"Incorrect target_layer value: {}\".format(target_layer))\n x = input\n if target_layer >= 11:\n # print(\"Applying deconv7\")\n x = self.deconv_act11(x)\n x = self.deconv_conv11(x, output_size=[encoder_features['conv10'].shape[-1]])\n # print(\"deconv11:\", x.shape)\n if target_layer >= 10:\n # print(\"Applying deconv7\")\n x = self.deconv_act10(x)\n x = self.deconv_conv10(x, output_size=[encoder_features['conv9'].shape[-1]])\n # print(\"deconv10:\", x.shape)\n if target_layer >= 9:\n # print(\"Applying deconv7\")\n x = self.deconv_act9(x)\n x = self.deconv_conv9(x, output_size=[encoder_features['mp8'].shape[-1]])\n # print(\"deconv9:\", x.shape)\n if target_layer >= 8:\n # print(\"Applying deconv7\")\n x = self.deconv_mp8(x, switches['mp8'], output_size=[encoder_features['conv8'].shape[-1]])\n x = self.deconv_act8(x)\n x = self.deconv_conv8(x, output_size=[encoder_features['mp7'].shape[-1]])\n # print(\"deconv8:\", x.shape)\n if target_layer >= 7:\n # print(\"Applying deconv7\")\n x = self.deconv_mp7(x, switches['mp7'], output_size=[encoder_features['conv7'].shape[-1]])\n x = self.deconv_act7(x)\n x = self.deconv_conv7(x, output_size=[encoder_features['mp6'].shape[-1]])\n # print(\"deconv7:\", x.shape)\n if target_layer >= 6:\n # print(\"Applying deconv6\")\n x = self.deconv_mp6(x, switches['mp6'], output_size=[encoder_features['conv6'].shape[-1]])\n x = self.deconv_act6(x)\n x = self.deconv_conv6(x, output_size=[encoder_features['mp5'].shape[-1]])\n if target_layer >= 5:\n # print(\"Applying deconv5\")\n x = self.deconv_mp5(x, switches['mp5'], output_size=[encoder_features['conv5'].shape[-1]])\n x = self.deconv_act5(x)\n x = self.deconv_conv5(x, output_size=[encoder_features['mp4'].shape[-1]])\n if target_layer >= 4:\n # print(\"Applying deconv4\")\n x = self.deconv_mp4(x, switches['mp4'], output_size=[encoder_features['conv4'].shape[-1]])\n x = self.deconv_act4(x)\n x = self.deconv_conv4(x, output_size=[encoder_features['mp3'].shape[-1]])\n if target_layer >= 3:\n # print(\"Applying deconv3\")\n x = self.deconv_mp3(x, switches['mp3'], output_size=[encoder_features['conv3'].shape[-1]])\n x = self.deconv_act3(x)\n x = self.deconv_conv3(x, output_size=[encoder_features['mp2'].shape[-1]])\n if target_layer >= 2:\n # print(\"Applying deconv2\")\n x = self.deconv_mp2(x, switches['mp2'], output_size=[encoder_features['conv2'].shape[-1]])\n x = self.deconv_act2(x)\n x = self.deconv_conv2(x, output_size=[encoder_features['mp1'].shape[-1]])\n if target_layer >= 1:\n # print(\"Applying deconv1\")\n x = self.deconv_mp1(x, switches['mp1'], output_size=[encoder_features['conv1'].shape[-1]])\n x = self.deconv_act1(x)\n x = self.deconv_conv1(x, output_size=[batch.shape[-1]])\n return x\n\n def visualize_layer(self, batch, encoder_features, switches, target_layer, top_n=9):\n # the order of things is as follows:\n # 1. Get target layers feature maps\n # 2. select top n most active activation maps. let these be idxs = {i_0, ---- i_n-1}\n # 3. for i in [i_0, - - - - -, i_n-1]:\n # -> set all activation maps apart from i to zero\n # -> deconv consecutively until you reach the input space\n # 4. return all these visualizations\n # pass\n # currently only support 1 inference at a time\n assert len(batch) == 1\n\n conv_maps = encoder_features[\"conv{}\".format(target_layer)]\n feature_maps = encoder_features[\"act{}\".format(target_layer)]\n if target_layer < 9:\n feature_maps = encoder_features['mp{}'.format(target_layer)]\n assert conv_maps.size(0) == feature_maps.size(0) == 1\n\n # idxs = torch.argsort(feature_maps.sum(dim=2), 1, descending=True)[0, :top_n].detach().cpu().tolist()\n # results = {}\n # for idx in range(feature_maps.size(1)):\n # inp = torch.zeros_like(feature_maps)\n # inp[:, idx, :] = feature_maps[:, idx, :]\n # vis = self.deconvolve(batch, inp, encoder_features, switches, target_layer)\n # results[idx] = vis.detach().cpu().numpy()\n # return results, idxs\n inp = feature_maps.clone()\n vis = self.deconvolve(batch, inp, encoder_features, switches, target_layer)\n vis = vis.detach().cpu().numpy()\n return vis\n\n def visualize_layer_v2(self, batch, encoder_features, switches, target_layer, top_n=9):\n # the order of things is as follows:\n # 1. Get target layers feature maps\n # 2. select top n most active activation maps. let these be idxs = {i_0, ---- i_n-1}\n # 3. for i in [i_0, - - - - -, i_n-1]:\n # -> set all activation maps apart from i to zero\n # -> deconv consecutively until you reach the input space\n # 4. return all these visualizations\n # pass\n # currently only support 1 inference at a time\n assert len(batch) == 1\n\n conv_maps = encoder_features[\"conv{}\".format(target_layer)]\n feature_maps = encoder_features[\"act{}\".format(target_layer)]\n if target_layer < 9:\n feature_maps = encoder_features['mp{}'.format(target_layer)]\n assert conv_maps.size(0) == feature_maps.size(0) == 1\n\n # idxs = torch.argsort(feature_maps.sum(dim=2), 1, descending=True)[0, :top_n].detach().cpu().tolist()\n # results = {}\n results = []\n for idx in range(feature_maps.size(1)):\n inp = torch.zeros_like(feature_maps)\n inp[:, idx, :] = feature_maps[:, idx, :]\n vis = self.deconvolve(batch, inp, encoder_features, switches, target_layer)\n vis = vis.detach().cpu().squeeze().numpy()\n results.append(vis)\n # result = np.asarray(results).mean(0)\n return results\n\n def visualize_specific_map(self, batch, encoder_features, switches, target_layer, target_filter, top_n=9):\n assert len(batch) == 1\n\n conv_maps = encoder_features[\"conv{}\".format(target_layer)]\n feature_maps = encoder_features[\"act{}\".format(target_layer)]\n if target_layer < 9:\n feature_maps = encoder_features['mp{}'.format(target_layer)]\n assert conv_maps.size(0) == feature_maps.size(0) == 1\n inp = torch.zeros_like(feature_maps)\n inp[:, target_filter, :] = feature_maps[:, target_filter, :]\n vis = self.deconvolve(batch, inp, encoder_features, switches, target_layer)\n vis = vis.detach().cpu().numpy()\n return vis\n\n def visualize_top_k_maps(self, batch, encoder_features, switches, target_layer, top_k):\n assert len(batch) == 1\n\n conv_maps = encoder_features[\"conv{}\".format(target_layer)]\n feature_maps = encoder_features[\"act{}\".format(target_layer)]\n if target_layer < 9:\n feature_maps = encoder_features['mp{}'.format(target_layer)]\n assert conv_maps.size(0) == feature_maps.size(0) == 1\n idxs = torch.argsort(feature_maps.abs().mean(dim=2), 1, descending=True)[0, :top_k].detach().cpu().tolist()\n results = []\n for idx in idxs:\n inp = torch.zeros_like(feature_maps)\n inp[:, idx, :] = feature_maps[:, idx, :]\n vis = self.deconvolve(batch, inp, encoder_features, switches, target_layer)\n vis = vis.detach().cpu().squeeze().numpy()\n results.append(vis)\n return results\n",
"import pickle\nimport matplotlib.pyplot as plt\nimport torch\nfrom src.utilities import interpretability_utils\nimport tqdm\nfrom tqdm import notebook\n\n\ndef get_selected_maps():\n with open(\"/media/user/nvme/contrastive_experiments/select_feature_maps.pkl\", \"rb\") as fd:\n selected_maps = pickle.load(fd)\n return selected_maps\n\n\ndef get_selected_inputs():\n with open(\"/media/user/nvme/contrastive_experiments/selected_inputs_evolution.pkl\", \"rb\") as fd:\n selected_inputs = pickle.load(fd)\n return selected_inputs\n\n\ndef get_max_activating_inputs_for_best_ckpt(layer_index, dataset,\n inv_lbl_map, net,\n selected_maps,\n top_per_map=9, to_exclude=[]):\n assert layer_index >= 1 and layer_index <= 11\n features_of_interest = {}\n random_maps = None\n used_data_points = []\n cnt = 0\n skipped = 0\n # for batch in tqdm.notebook.tqdm_notebook(loader, position=1):\n for ix in tqdm.notebook.tqdm_notebook(range(len(dataset)), position=1):\n x, y = dataset[ix]\n # print(x.shape, y.shape)\n # x, _, y = batch\n x = x.unsqueeze(0)\n min_ = x.min()\n max_ = x.max()\n if min_ < -1 and max_ > 1:\n print(\"IN ANALYZIE RANDOM MAX, INPUT MIN, MAX:\", x.min(), x.max())\n idxs = torch.where(y == 1)[0].tolist()\n skip_flag = False\n for idx in idxs:\n if idx in to_exclude:\n skip_flag = True\n break\n if skip_flag:\n skipped += 1\n continue\n lbls = \";\".join([inv_lbl_map[lbl_idx] for lbl_idx in idxs])\n # data.append(x)\n # gts.append(lbls)\n x = x.cuda()\n output_features, switch_indices = interpretability_utils.infer_model(net, x)\n act_feats = output_features['act{}'.format(layer_index)]\n\n if random_maps is None:\n random_maps = selected_maps['act{}'.format(layer_index)]\n for jx in random_maps:\n features_of_interest[jx] = []\n for m in random_maps:\n features_of_interest[m].append(act_feats[0, m, :].detach().cpu().mean())\n cnt += 1\n used_data_points.append(ix)\n print(\"Skipped:\", skipped)\n indices = {}\n for k, values in features_of_interest.items():\n mean_activations = torch.tensor(values)\n idxs = torch.argsort(mean_activations, descending=True)[:top_per_map]\n indices[k] = idxs.tolist()\n return indices, used_data_points\n\n\ndef analyze_random_maps(layer_index, dataset, inv_lbl_map,\n net, deconv,\n max_activation_inputs, used_data_points):\n assert layer_index >= 1 and layer_index <= 11\n outputs = {}\n for k in max_activation_inputs.keys():\n outputs[k] = []\n\n for k, idxs in max_activation_inputs.items():\n for idx in idxs:\n # print(idx)\n # inp = data[idx].cuda()\n inp, y = dataset[used_data_points[idx]]\n label_indicators = torch.where(y == 1)[0].tolist()\n gt = \";\".join([inv_lbl_map[lbl_idx] for lbl_idx in label_indicators])\n inp = inp.unsqueeze(0).cuda()\n with torch.no_grad():\n pred, output_features, switch_indices = net(inp, True)\n vis = deconv.visualize_specific_map(inp, output_features, switch_indices, layer_index, k)\n outputs[k].append({\n # \"data\": inp.detach().cpu(),\n \"data_idx\": used_data_points[idx],\n \"vis\": interpretability_utils.process_vis(vis.squeeze(), inp.squeeze().cpu().numpy()),\n \"gt\": gt\n })\n return outputs\n\n\ndef get_ckpt_indices(is_contrastive=False):\n if is_contrastive:\n ckpt_idxs = [100, 80, 60, 40, 20, 1]\n else:\n ckpt_idxs = [1] + [i * 10 for i in range(1, 6)]\n ckpt_idxs = ckpt_idxs[::-1]\n return ckpt_idxs\n\n\ndef process_features_over_training(exp_dir, ckpt_idxs, layer_index, selected_inputs,\n val_set, inv_lbl_map, is_contrastive=False):\n # load model\n all_results = {}\n indices, used_data_points = selected_inputs[layer_index]['indices'], selected_inputs[layer_index]['used_data_points']\n print(indices)\n for ckpt_idx in notebook.tqdm(ckpt_idxs, position=0):\n model, net, deconv, hparams = interpretability_utils.model_helper(exp_dir, is_contrastive, epoch_index=ckpt_idx)\n results_ckpt = analyze_random_maps(layer_index, val_set, inv_lbl_map, net, deconv, indices, used_data_points)\n all_results[ckpt_idx] = results_ckpt\n return all_results\n\n\ndef spec_helper(spec_list, filter_idx, layer_idx, ckpt_idxs, save_path=None):\n plt.close()\n plt.clf()\n figs, axs = plt.subplots(ncols=6, figsize=(30, 5))\n figs.suptitle('Feature Evolution: Layer {:02d} | Filter: {:02d}'.format(layer_idx, filter_idx))\n for ix in range(len(spec_list)):\n axs[5 - ix].imshow(spec_list[ix])\n axs[5 - ix].set_title(\"Epoch: {:02d}\".format(ckpt_idxs[ix]))\n axs[5 - ix].axis(\"off\")\n plt.tight_layout()\n if save_path:\n plt.savefig(save_path)\n else:\n plt.show()\n\n\ndef plot_evo_spectrograms(layer_results, layer_idx, ckpt_idxs):\n specs = {}\n for epoch, data in layer_results.items():\n for key, value in data.items():\n # print(key, value, len(value))\n vis = value[0]['vis']\n spec = interpretability_utils.get_spec(vis)\n try:\n specs[key].append(spec)\n except KeyError as ex:\n specs[key] = [spec]\n\n for k, v in specs.items():\n spec_helper(v, k, layer_idx, ckpt_idxs)\n\n\ndef plot_evo_spectrograms_noplot(layer_results, layer_idx, ckpt_idxs):\n specs = {}\n for epoch, data in layer_results.items():\n for key, value in data.items():\n # print(key, value, len(value))\n vis = value[0]['vis']\n spec = interpretability_utils.get_spec(vis)\n # print(spec.shape)\n # spec = torch.from_numpy(spec).unsqueeze\n torch_spec = torch.cat([torch.from_numpy(spec).unsqueeze(0)]*3)\n try:\n specs[key].append(torch_spec)\n except KeyError as ex:\n specs[key] = [torch_spec]\n return specs\n\n\ndef tile_spectrograms(specs):\n all_specs = []\n for key, val in specs.items():\n print(key)\n key_specs = []\n for item in val[::-1]:\n all_specs.append(item)\n all_specs.extend(key_specs)\n return all_specs\n"
] |
[
[
"torch.nn.Parameter",
"torch.zeros_like",
"torch.nn.MaxUnpool1d"
],
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"torch.tensor",
"torch.from_numpy",
"matplotlib.pyplot.clf",
"torch.no_grad",
"matplotlib.pyplot.close",
"torch.where",
"torch.argsort",
"matplotlib.pyplot.show"
]
] |
nrandell/onnxruntime
|
[
"4d32050f784fc73d065bb030e937bd45ac86ff32"
] |
[
"onnxruntime/test/python/onnx_backend_test_series.py"
] |
[
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport argparse\nimport sys\nimport os\nimport platform\nimport unittest\nimport onnx\nimport onnx.backend.test\n\nimport numpy as np\nimport onnxruntime.backend as c2\n\npytest_plugins = 'onnx.backend.test.report',\n\nclass OrtBackendTest(onnx.backend.test.BackendTest):\n\n def __init__(self, backend, parent_module=None):\n super(OrtBackendTest, self).__init__(backend, parent_module)\n\n @classmethod\n def assert_similar_outputs(cls, ref_outputs, outputs, rtol, atol):\n np.testing.assert_equal(len(ref_outputs), len(outputs))\n for i in range(len(outputs)):\n np.testing.assert_equal(ref_outputs[i].dtype, outputs[i].dtype)\n if ref_outputs[i].dtype == np.object:\n np.testing.assert_array_equal(ref_outputs[i], outputs[i])\n else:\n np.testing.assert_allclose(\n ref_outputs[i],\n outputs[i],\n rtol=1e-3,\n atol=1e-5)\n\n\n# ORT first supported opset 7, so models with nodes that require versions prior to opset 7 are not supported\ndef tests_with_pre_opset7_dependencies_filters():\n filters = ['^test_AvgPool1d_cpu',\n '^test_AvgPool1d_stride_cpu',\n '^test_AvgPool2d_cpu',\n '^test_AvgPool2d_stride_cpu',\n '^test_AvgPool3d_cpu',\n '^test_AvgPool3d_stride1_pad0_gpu_input_cpu',\n '^test_AvgPool3d_stride_cpu',\n '^test_BatchNorm1d_3d_input_eval_cpu',\n '^test_BatchNorm2d_eval_cpu',\n '^test_BatchNorm2d_momentum_eval_cpu',\n '^test_BatchNorm3d_eval_cpu',\n '^test_BatchNorm3d_momentum_eval_cpu',\n '^test_GLU_cpu',\n '^test_GLU_dim_cpu',\n '^test_Linear_cpu',\n '^test_PReLU_1d_cpu',\n '^test_PReLU_1d_multiparam_cpu',\n '^test_PReLU_2d_cpu',\n '^test_PReLU_2d_multiparam_cpu',\n '^test_PReLU_3d_cpu',\n '^test_PReLU_3d_multiparam_cpu',\n '^test_PoissonNLLLLoss_no_reduce_cpu',\n '^test_Softsign_cpu',\n '^test_operator_add_broadcast_cpu',\n '^test_operator_add_size1_broadcast_cpu',\n '^test_operator_add_size1_right_broadcast_cpu',\n '^test_operator_add_size1_singleton_broadcast_cpu',\n '^test_operator_addconstant_cpu',\n '^test_operator_addmm_cpu',\n '^test_operator_basic_cpu',\n '^test_operator_mm_cpu',\n '^test_operator_non_float_params_cpu',\n '^test_operator_params_cpu',\n '^test_operator_pow_cpu']\n\n return filters\n\n\ndef unsupported_usages_filters():\n filters = ['^test_convtranspose_1d_cpu', # ConvTransponse supports 4-D only\n '^test_convtranspose_3d_cpu']\n\n return filters\n\n\ndef other_tests_failing_permanently_filters():\n # Numpy float to string has unexpected rounding for some results given numpy default precision is meant to be 8.\n # e.g. 0.296140194 -> '0.2961402' not '0.29614019'. ORT produces the latter with precision set to 8, which\n # doesn't match the expected output that was generated with numpy.\n filters = ['^test_cast_FLOAT_to_STRING_cpu']\n\n return filters\n\n\n\ndef test_with_types_disabled_due_to_binary_size_concerns_filters():\n filters = ['^test_bitshift_right_uint16_cpu',\n '^test_bitshift_left_uint16_cpu']\n\n return filters\n\n\ndef create_backend_test(testname=None):\n backend_test = OrtBackendTest(c2, __name__)\n\n # Type not supported\n backend_test.exclude(r'(FLOAT16)')\n\n if testname:\n backend_test.include(testname + '.*')\n else:\n # Tests that are failing temporarily and should be fixed\n current_failing_tests = [#'^test_cast_STRING_to_FLOAT_cpu', # old test data that is bad on Linux CI builds\n '^test_unique_not_sorted_without_axis_cpu', # bad expected data. enable after https://github.com/onnx/onnx/pull/2381 is picked up\n '^test_mod_float_mixed_sign_example_cpu', #onnxruntime::Mod::Compute fmod_ was false. fmod attribute must be true for float, float16 and double types\n '^test_resize_downsample_scales_cubic_align_corners_cpu', # results mismatch with onnx tests\n '^test_resize_downsample_scales_linear_align_corners_cpu', # results mismatch with onnx tests\n '^test_resize_tf_crop_and_resize_cpu', # bad expected data, needs test fix\n '^test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu', # bad expected data, needs test fix\n '^test_resize_upsample_sizes_nearest_floor_align_corners_cpu', # bad expected data, needs test fix\n '^test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu', # bad expected data, needs test fix\n '^test_maxunpool_export_with_output_shape_cpu', # Invalid output in ONNX test. See https://github.com/onnx/onnx/issues/2398'\n ]\n\n # Example of how to disable tests for a specific provider.\n # if c2.supports_device('NGRAPH'):\n # current_failing_tests.append('^test_operator_repeat_dim_overflow_cpu')\n if c2.supports_device('NGRAPH'):\n current_failing_tests += ['^test_clip.*',\n '^test_qlinearconv_cpu',\n '^test_depthtospace_crd.*',\n '^test_argmax_negative_axis.*',\n '^test_argmin_negative_axis.*',\n '^test_hardmax_negative_axis.*',\n '^test_gemm_default_no_bias_cpu',\n '^test_flatten_negative_axis.*',\n '^test_reduce_[a-z1-9_]*_negative_axes_.*',\n 'test_squeeze_negative_axes_cpu',\n 'test_unsqueeze_negative_axes_cpu',\n 'test_constant_pad_cpu',\n 'test_edge_pad_cpu',\n 'test_reflect_pad_cpu']\n\n if c2.supports_device('MKL-DNN'):\n current_failing_tests += ['^test_range_float_type_positive_delta_expanded_cpu',\n '^test_range_int32_type_negative_delta_expanded_cpu']\n\n if c2.supports_device('OPENVINO_GPU_FP32') or c2.supports_device('OPENVINO_GPU_FP16'):\n current_failing_tests.append('^test_div_cpu*')\n\n if c2.supports_device('OPENVINO_CPU_FP32'):\n current_failing_tests += ['^test_scan9_sum_cpu',#sum_out output node not defined, temporarily disabling test\n '^test_scan_sum_cpu'] #sum_out output node not defined, temporarily disabling test\n\n filters = current_failing_tests + \\\n tests_with_pre_opset7_dependencies_filters() + \\\n unsupported_usages_filters() + \\\n other_tests_failing_permanently_filters() + \\\n test_with_types_disabled_due_to_binary_size_concerns_filters()\n\n backend_test.exclude('(' + '|'.join(filters) + ')')\n print('excluded tests:', filters)\n\n # import all test cases at global scope to make\n # them visible to python.unittest.\n globals().update(backend_test.enable_report().test_cases)\n\n return backend_test\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(os.path.basename(__file__),\n description='Run the ONNX backend tests using ONNXRuntime.')\n\n # Add an argument to match a single test name, by adding the name to the 'include' filter.\n # Using -k with python unittest (https://docs.python.org/3/library/unittest.html#command-line-options)\n # doesn't work as it filters on the test method name (Runner._add_model_test) rather than inidividual test case names.\n parser.add_argument('-t', '--test-name', dest='testname', type=str,\n help=\"Only run tests that match this value. Matching is regex based, and '.*' is automatically appended\")\n\n # parse just our args. python unittest has its own args and arg parsing, and that runs inside unittest.main()\n args, left = parser.parse_known_args()\n sys.argv = sys.argv[:1] + left\n\n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n backend_test = create_backend_test(args.testname)\n unittest.main()\n"
] |
[
[
"numpy.testing.assert_array_equal",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose"
]
] |
tcsvn/pyadlml
|
[
"9b87d223ba0ef9814ba830263dd35fc6432fae87"
] |
[
"pyadlml/dataset/plot/image.py"
] |
[
"from pyadlml.dataset.plot.util import heatmap, annotate_heatmap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\n\ndef forceAspect(ax,aspect):\n im = ax.get_images()\n extent = im[0].get_extent()\n ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)\n \ndef mean_image(images, devices, figsize=(10,10)):\n \"\"\" plots the mean oimage of all images\n Parameters\n ----------\n images: 3d np array (n x win_size x dev_count)\n a 3d batch of images with window size and the feature/devices as last axis\n devices: list \n a list of all device labels\n \n \"\"\"\n title='Mean image'\n \n mean_image = images.mean(axis=0).T\n \n fig, ax = plt.subplots(figsize=figsize)\n h, w = mean_image.shape\n \n im = ax.imshow(mean_image, extent=[0,w,0,h], vmin=0, vmax=1)\n \n \n divider = make_axes_locatable(ax) \n cax = divider.append_axes(\"right\", size='5%', pad=0.1)\n cbar = cax.figure.colorbar(im, cax)\n #plt.colorbar(im, cax=cax)\n \n \n ax.set_xticks(np.arange(w) + 0.5)\n ax.set_yticks(np.arange(h) + 0.5)\n \n ax.set_xticklabels(np.arange(w)+1)\n ax.set_yticklabels(devices[::-1]) # reverse because of transpose of values\n \n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n \n #forceAspect(ax, aspect=1.5)\n ax.set_title(title)\n fig.tight_layout()\n plt.show()\n\n\n\ndef mean_image_per_activity(X, y, devices, figsize=(14,18)):\n \"\"\" plots the mean oimage of all images\n Parameters\n ----------\n X: 3d np array (n x win_size x dev_count)\n a 3d batch of images with window size and the feature/devices as last axis\n y: 1d array (n,) of strings\n activity labels \n devices: list \n a list of all device labels\n \"\"\"\n title='Mean image per Activity'\n num_plts_x = 3\n \n activities = np.unique(y)\n \n mi_lst = []\n for act in activities:\n # get the index where the activity matches and compute mean\n idxs = np.where(y == act)[0] \n mi_lst.append(X[idxs].mean(axis=0).T)\n \n len_acts = len(activities)\n num_plts_y = int(np.ceil(len_acts/num_plts_x))\n \n \n # plotting\n fig, axs = plt.subplots(num_plts_y, num_plts_x, \n figsize=figsize)\n #plt.title('asdf', y=1.08)\n \n h, w = mi_lst[0].shape\n k = 0\n pcm_list = []\n for i in range(num_plts_y):\n for j in range(num_plts_x):\n if k >= len(activities):\n axs[i,j].axis('off')\n continue\n \n ax = axs[i,j]\n im = ax.imshow(mi_lst[k], vmin=0, vmax=1)#, extent=[0,w,0,h],)\n #pcm = ax.pcolormesh(mi_lst[k], cmap='viridis')\n #pcm_list.append(pcm)\n \n #if k+1 == len(activities):\n ax.set_yticks(np.arange(h))\n if j == 0: \n ax.set_yticklabels(devices)\n else:\n ax.set_yticklabels([])\n ax.set_xticks(np.arange(w))\n ax.set_xticklabels(np.arange(w)+1)\n \n ax.set_title(activities[k])\n k += 1\n \n # make cosmetic changes\n plt.subplots_adjust(wspace=0.1, hspace=-.6)\n plt.suptitle(title,fontsize=20, y=0.8, va='top')\n plt.show()"
] |
[
[
"numpy.unique",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.ceil",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.suptitle",
"numpy.where",
"matplotlib.pyplot.show"
]
] |
AlanLoh/nenupy
|
[
"5b9e6ae7cc28d0dc4ed450a408b124f71a1c9cc8"
] |
[
"nenupy/observation/obs_config.py"
] |
[
"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n *************************\n Observation configuration\n *************************\n\n `NenuFAR <https://nenufar.obs-nancay.fr/en/astronomer/>`_\n is a versatile low-frequency radio telescope. Several\n observing modes are available, as represented by the \n diversity of its `receivers <https://nenufar.obs-nancay.fr/en/astronomer/#receivers>`_.\n Observations are configured thanks to text files\n called *parsets* in which the details of the instrumental set-up,\n the observation mode(s),\n the data sampling parameters,\n the operations applied on the data, etc. are listed.\n\n The :mod:`~nenupy.observation.obs_config` module aims at\n handling these different observing configurations as well\n as providing estimation on the data volume output by\n *NenuFAR* in one or several given set-up(s).\n\n NenuFAR Receiver setup\n ----------------------\n \n Manual setting\n ^^^^^^^^^^^^^^\n\n The various `receivers <https://nenufar.obs-nancay.fr/en/astronomer/#receivers>`_\n configurations may be set 'manually'. In such case, the user\n needs to fill in the different parameters relevant to\n characterize the observation with the desired receiver.\n\n For instance, say one is interested in performing an observation\n and wants to estimate the volume of the most basic *NenuFAR* data\n output: the *Beamlet Statistics* (or *BST*) FITS files. Instanciating\n an 'empty' :class:`~nenupy.observation.obs_config.BSTConfig` object\n and printing it gives a quicklook of all the properties one may want\n to modify, as well as their current values that are set by default:\n\n .. code-block:: python\n\n >>> from nenupy.observation import BSTConfig\n >>> bstconf = BSTConfig()\n >>> print(bstconf)\n Backend configuration of type 'BSTConfig'\n Properties: 'nSubBands=768', 'nPolars=2', 'durationSec=0'\n\n .. note::\n At any time, the user may query the receiver parameters\n by printing the corresponding instance.\n\n .. seealso::\n The other receivers dedicated classes are listed in :ref:`obs_config_class_summary`.\n\n Attribute values can be directly set to the user preferences\n to update the status of the current :class:`~nenupy.observation.obs_config.BSTConfig`\n instance:\n\n .. code-block:: python\n\n >>> bstconf.durationSec = 1800\n\n Alternatively, the object can be initialized with specific\n property values given as keyword arguments:\n\n .. code-block:: python\n\n >>> bstconf = BSTConfig(durationSec=1800)\n\n Finally, to compute an estimation of the data volume \n (returned as a :class:`~astropy.units.Quantity` object):\n\n .. code-block:: python\n\n >>> bstconf.volume\n 10.546875 Mibyte\n\n >>> vol = bstconf.volume\n >>> vol.to('Gibyte')\n 0.010299683Gibyte\n\n .. warning::\n The *beamformer* receivers allow for multi-beams observations. These\n properties cannot be set manually in a straightforward way.\n Instead, it is recommended to either treat each individual beam\n separately or to instanciate the relevant objects with a parset file.\n\n Setting from Parset file\n ^^^^^^^^^^^^^^^^^^^^^^^^\n\n The most convenient way to set a given receiver's properties\n associated to as specific observation is to initialize the\n corresponding object instance from the observation *parset*:\n\n .. code-block:: python\n\n >>> from nenupy.observation import BSTConfig\n >>> bstconf = BSTConfig.fromParset('/path/to/observation.parset')\n\n Calling the class method ``fromParset`` automatically loads the given file\n as a :class:`~nenupy.observation.parset.Parset`. The contained instrumental\n information is parsed and the properties relevant to the receiver class\n are used to initialize the object instance.\n\n If an observation is configured to use the multi-beams capability of\n *NenuFAR*, the receiver properties will take that into account \n and the data volume estimation will then be computed accordingly.\n\n .. warning::\n At the current stage of development, the *NenuFAR* configuration files\n called *parset user*\n (ending with ``'.parset_user'``) are not supported.\n\n Observation setup\n -----------------\n\n Rather than configuring each receiver individually, one might be\n interested in setting all of the *NenuFAR* receivers at once, from one\n or several *parset* file(s).\n This is achieved using the :class:`~nenupy.observation.obs_config.ObsConfig`\n class which stores information on all available receivers and update their\n configuration parameters according to what is described in the *parset* file(s).\n\n Single observation\n ^^^^^^^^^^^^^^^^^^\n\n In the case of a single observation, described by a unique *parset* file\n (namely ``'/path/to/observation.parset'`` in the following example),\n an instance of :class:`~nenupy.observation.obs_config.ObsConfig` is\n simply created using the class method\n :meth:`~nenupy.observation.obs_config.ObsConfig.fromParset`:\n\n .. code-block:: python\n\n >>> from nenupy.observation import ObsConfig\n >>> obsconf = ObsConfig.fromParset('/path/to/observation.parset')\n\n The variable called ``obsconf`` of type :class:`~nenupy.observation.obs_config.ObsConfig`\n now contains attributes named after\n the various *NenuFAR* receivers. Every one of these attributes is a\n list (of only one element in this case) of corresponding configuration\n class instances:\n\n .. code-block:: python\n\n >>> type(obsconf.tf[0])\n nenupy.observation.obs_config.TFConfig\n\n >>> type(obsconf.nickel[0])\n nenupy.observation.obs_config.NICKELConfig\n\n Querying :attr:`~nenupy.observation.obs_config.ObsConfig.volume` returns\n a dictionnary composed of the *NenuFAR* receivers as keys and their\n corresponding raw data volume estimations for the current observation:\n\n .. code-block:: python\n\n >>> obsconf.volume\n {'nickel': <Quantity 0. Gibyte>,\n 'raw': <Quantity 0. Gibyte>,\n 'tf': <Quantity 56.57784641 Gibyte>,\n 'bst': <Quantity 9.4921875 Mibyte>,\n 'pulsar_fold': <Quantity 0. Gibyte>,\n 'pulsar_waveolaf': <Quantity 0. Gibyte>,\n 'pulsar_single': <Quantity 0. Gibyte>}\n\n\n List of observations\n ^^^^^^^^^^^^^^^^^^^^\n\n Conveniently, it is also possible to initialize an\n :class:`~nenupy.observation.obs_config.ObsConfig` object\n from a list of several *parset* files.\n In order to do that, one simply needs to call the\n :meth:`~nenupy.observation.obs_config.ObsConfig.fromParsetList`\n class method:\n\n .. code-block:: python\n\n >>> from nenupy.observation import ObsConfig\n >>> obsconf = ObsConfig.fromParsetList(\n [\n '/path/to/observation_1.parset',\n '/path/to/observation_2.parset',\n '/path/to/observation_3.parset'\n ]\n )\n \n Querying the :attr:`~nenupy.observation.obs_config.ObsConfig.volume`\n attribute returns a dictionnary with the summed estimated raw\n data volumes for all the *NenuFAR* receivers over all the\n observations described by the *parset* files:\n\n .. code-block:: python\n\n >>> obsconf.volume\n {'nickel': <Quantity 726.41601562 Gibyte>,\n 'raw': <Quantity 282.88923204 Gibyte>,\n 'tf': <Quantity 1093.13987195 Gibyte>,\n 'bst': <Quantity 264.4921875 Mibyte>,\n 'pulsar_fold': <Quantity 11.50373708 Gibyte>,\n 'pulsar_waveolaf': <Quantity 558.79404545 Gibyte>,\n 'pulsar_single': <Quantity 61.29266694 Gibyte>}\n\n To get the total estimated raw data volume for a specific\n receiver, and convert its unit to *Terabytes* for instance, on can\n do:\n\n .. code-block:: python\n\n >>> obsconf.volume['tf'].to('Tibyte')\n 1.0675194 Tibyte\n\n Assuming ``dec2020_parset_list`` is a list of parsets asociated with\n the past observations done in December 2020, plotting the cumulative\n estimated raw data volume is also eased by the method\n :meth:`~nenupy.observation.obs_config.ObsConfig.plotCumulativeVolume`:\n\n .. code-block:: python\n\n >>> from nenupy.observation import ObsConfig\n >>> obsconf = ObsConfig.fromParsetList(dec2020_parset_list)\n >>> obsconf.plotCumulativeVolume(\n title='NenuFAR observations, December 2020',\n scale='log'\n )\n\n .. image:: ./_images/volume_december20_log.png\n :width: 800\n\n .. _obs_config_class_summary:\n\n Classes summary\n ---------------\n\n .. autosummary::\n :nosignatures:\n\n ~nenupy.observation.obs_config.ObsConfig\n ~nenupy.observation.obs_config.BSTConfig\n ~nenupy.observation.obs_config.NICKELConfig\n ~nenupy.observation.obs_config.TFConfig\n ~nenupy.observation.obs_config.RAWConfig\n ~nenupy.observation.obs_config.PulsarFoldConfig\n ~nenupy.observation.obs_config.PulsarWaveConfig\n ~nenupy.observation.obs_config.PulsarSingleConfig\n\n\"\"\"\n\n\n__author__ = 'Alan Loh, Baptiste Cecconi'\n__copyright__ = 'Copyright 2020, nenupy'\n__credits__ = ['Alan Loh']\n__maintainer__ = 'Alan'\n__email__ = '[email protected]'\n__status__ = 'Production'\n__all__ = [\n 'BSTConfig',\n 'NICKELConfig',\n 'TFConfig',\n 'RAWConfig',\n 'PulsarFoldConfig',\n 'PulsarWaveConfig',\n 'PulsarSingleConfig',\n 'ObsConfig'\n]\n\n\nimport astropy.units as u\nfrom astropy.time import Time, TimeDelta\nimport numpy as np\n\nfrom nenupy.observation import Parset\nfrom nenupy.miscellaneous import accepts\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nbackendProperties = {\n 'nickel': {\n 'nSubBands': {\n 'min': 1,\n 'max': 384,\n 'default': 384,\n 'type': '`int`',\n 'desc': 'Number of sub-bands'\n },\n 'nChannels': {\n 'min': 1,\n 'max': 64,\n 'default': 64,\n 'type': '`int`',\n 'desc': 'Number of channels'\n },\n 'nPolars': {\n 'min': 1,\n 'max': 4,\n 'default': 4,\n 'type': '`int`',\n 'desc': 'Number of polarizations'\n },\n 'nMAs': {\n 'min': 2,\n 'max': 96 + 6,\n 'default': 96,\n 'type': '`int`',\n 'desc': 'Number of Mini-Arrays'\n },\n 'timeRes': {\n 'min': 0.,\n 'max': 10.,\n 'default': 1.,\n 'type': '`float` or :class:`~astropy.time.TimeDelta`',\n 'desc': 'Time resolution in seconds'\n }\n },\n 'raw': {\n 'nPolars': {\n 'min': 1,\n 'max': 4,\n 'default': 4,\n 'type': '`int`',\n 'desc': 'Number of polarizations'\n },\n 'nSubBands': {\n 'min': 1,\n 'max': 192,\n 'default': 192,\n 'type': '`int`',\n 'desc': 'Number of sub-bands'\n },\n 'nBits': {\n 'min': 8,\n 'max': 16,\n 'default': 8,\n 'type': '`int`',\n 'desc': 'Number of bits on which are recorded data elements'\n }\n },\n 'tf': {\n 'nPolars': {\n 'min': 1,\n 'max': 4,\n 'default': 4,\n 'type': '`int`',\n 'desc': 'Number of polarizations'\n },\n 'timeRes': {\n 'min': (0.30*u.ms).to(u.s).value,\n 'max': (83.89*u.ms).to(u.s).value,\n 'default': (5.00*u.ms).to(u.s).value,\n 'type': '`float` or :class:`~astropy.time.TimeDelta`',\n 'desc': 'Time resolution in seconds'\n },\n 'freqRes': {\n 'min': (0.10*u.kHz).to(u.Hz).value,\n 'max': (12.21*u.kHz).to(u.Hz).value,\n 'default': (6.10*u.kHz).to(u.Hz).value,\n 'type': '`float` or :class:`~astropy.units.Quantity`',\n 'desc': 'Frequency resolution in Hz'\n },\n 'nSubBands': {\n 'min': 1,\n 'max': 768,\n 'default': 768,\n 'type': '`int`',\n 'desc': 'Number of sub-bands'\n },\n },\n 'bst': {\n 'nSubBands': {\n 'min': 1,\n 'max': 768,\n 'default': 768,\n 'type': '`int`',\n 'desc': 'Number of sub-bands'\n },\n 'nPolars': {\n 'min': 1,\n 'max': 2,\n 'default': 2,\n 'type': '`int`',\n 'desc': 'Number of polarizations'\n }\n },\n # 'sst': {},\n 'pulsar_fold': {\n 'nSubBands': {\n 'min': 1,\n 'max': 192,\n 'default': 192,\n 'type': '`int`',\n 'desc': 'Number of sub-bands'\n },\n 'nPolars': {\n 'min': 1,\n 'max': 4,\n 'default': 4,\n 'type': '`int`',\n 'desc': 'Number of polarizations'\n },\n 'tFold': {\n 'min': 5.36870912,\n 'max': 21.47483648,\n 'default': 10.73741824,\n 'type': '`float` or :class:`~astropy.time.TimeDelta`',\n 'desc': 'Pulsar time fold in seconds'\n },\n 'nBins': {\n 'min': 16,\n 'max': 8096,\n 'default': 2048,\n 'type': '`int`',\n 'desc': 'Number of bins'\n }\n },\n 'pulsar_waveolaf': {\n 'nSubBands': {\n 'min': 1,\n 'max': 192,\n 'default': 192,\n 'type': '`int`',\n 'desc': 'Number of sub-bands'\n }\n },\n 'pulsar_single': {\n 'nSubBands': {\n 'min': 1,\n 'max': 192,\n 'default': 192,\n 'type': '`int`',\n 'desc': 'Number of sub-bands'\n },\n 'nPolars': {\n 'min': 1,\n 'max': 4,\n 'default': 4,\n 'type': '`int`',\n 'desc': 'Number of polarizations'\n },\n 'dsTime': {\n 'min': 1,\n 'max': 4096,\n 'default': 128,\n 'type': '`int`',\n 'desc': 'Downsampling'\n },\n 'nBits': {\n 'min': 8,\n 'max': 64,\n 'default': 32,\n 'type': '`int`',\n 'desc': 'Number of bits on which are recorded data elements'\n }\n }\n}\n\n\ncomplex64 = np.complex64().itemsize * u.byte\nfloat32 = np.float32().itemsize * u.byte\n\n\ndef doc(docstring, backend):\n prop = backendProperties[backend]\n paramDoc = ''\n for key in prop.keys():\n param = prop[key]\n paramDoc += f\"\"\"\n :param {key}:\n {param['desc']} (min: ``{param['min']}``,\n max: ``{param['max']}``, default: ``{param['default']}``).\n :type {key}: {param['type']}\n \"\"\"\n paramDoc += \"\"\"\n :param durationSec:\n Observation duration in seconds (default: ``0``).\n :type durationSec: `int` or :class:`~astropy.time.TimeDelta`\n\n .. versionadded:: 1.2.0\n \"\"\"\n def document(func):\n func.__doc__ = docstring + '\\n' + paramDoc\n return func\n return document\n\n\n# ============================================================= #\n# ---------------------- _BackendConfig ----------------------- #\n# ============================================================= #\nclass _BackendConfig(object):\n \"\"\"\n .. versionadded:: 1.2.0\n \"\"\"\n\n def __init__(self, backend, **kwargs):\n self.startTime = kwargs.get(\n 'startTime',\n Time.now()\n )\n self.durationSec = kwargs.get(\n 'durationSec',\n 0\n )\n self._backend = backend\n\n # Catch the irrelevant kwargs\n for attr in kwargs.keys():\n if attr.startswith('_'):\n raise AttributeError(\n \"Attribute '{}' starting with '_' cannot be set.\".format(\n attr\n )\n )\n if attr not in dir(self):\n raise AttributeError(\n \"'{}' object has no attribute '{}'\".format(\n self.__class__.__name__,\n attr\n )\n )\n\n # Fill the relevant kwargs\n for attr in backendProperties[self._backend].keys():\n setattr(\n self,\n attr,\n kwargs.get(\n attr,\n backendProperties[self._backend][attr]['default']\n )\n )\n\n\n def __str__(self):\n className = self.__class__.__name__\n title = \"Backend configuration of type '{}'\\n\".format(className)\n attributes = backendProperties[self._backend].keys()\n properties = '\\tProperties: '\n for att in attributes:\n properties += \"'{}={}', \".format(att, getattr(self, att))\n properties += \"'{}={}', \".format('durationSec', getattr(self, 'durationSec'))\n properties = properties[:-2] # remove the last coma\n return title + properties\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def nSubBands(self):\n \"\"\"\n \"\"\"\n return self._nSubBands\n @nSubBands.setter\n @accepts(object, int)\n def nSubBands(self, nsb):\n self._nSubBands = self._checkAttr(\n key='nSubBands',\n value=nsb,\n name='sub-bands'\n )\n\n\n @property\n def nChannels(self):\n \"\"\"\n \"\"\"\n return self._nChannels\n @nChannels.setter\n @accepts(object, int)\n def nChannels(self, chan):\n self._nChannels = self._checkAttr(\n key='nChannels',\n value=chan,\n name='channels'\n )\n\n\n @property\n def nBins(self):\n \"\"\"\n \"\"\"\n return self._nBins\n @nBins.setter\n @accepts(object, int)\n def nBins(self, bins):\n self._nBins = self._checkAttr(\n key='nBins',\n value=bins,\n name='bins'\n )\n\n\n @property\n def nPolars(self):\n \"\"\"\n \"\"\"\n return self._nPolars\n @nPolars.setter\n @accepts(object, int)\n def nPolars(self, np):\n self._nPolars = self._checkAttr(\n key='nPolars',\n value=np,\n name='polarizations'\n )\n\n\n @property\n def timeRes(self):\n \"\"\"\n \"\"\"\n return self._timeRes\n @timeRes.setter\n @accepts(object, (float, int, TimeDelta))\n def timeRes(self, dt):\n if isinstance(dt, TimeDelta):\n dt = dt.sec\n self._timeRes = self._checkAttr(\n key='timeRes',\n value=dt,\n name='time resolution'\n )\n\n\n @property\n def tFold(self):\n \"\"\"\n \"\"\"\n return self._tFold\n @tFold.setter\n @accepts(object, (float, int, TimeDelta))\n def tFold(self, tfold):\n if isinstance(tfold, TimeDelta):\n tfold = tfold.sec\n self._tFold = self._checkAttr(\n key='tFold',\n value=tfold,\n name='pulsar time fold'\n )\n\n\n @property\n def dsTime(self):\n \"\"\" Downsampling, can take values in [1, 2, 4, 8, 16, 32, 64, 128]\n \"\"\"\n return self._dsTime\n @dsTime.setter\n @accepts(object, int)\n def dsTime(self, ds):\n # Check that ds is a power of 2:\n is2pow = (ds & (ds-1) == 0) and ds != 0\n if not is2pow:\n raise ValueError(\n \"`dsTime` takes only power of two integer values.\"\n )\n self._dsTime = self._checkAttr(\n key='dsTime',\n value=ds,\n name='pulsar downsampling'\n )\n\n\n @property\n def nBits(self):\n \"\"\"\n \"\"\"\n return self._nBits\n @nBits.setter\n @accepts(object, int)\n def nBits(self, n):\n # Check that n is a power of 2:\n is2pow = (n & (n-1) == 0) and n != 0\n if not is2pow:\n raise ValueError(\n \"`nBits` takes only power of two integer values.\"\n )\n self._nBits = self._checkAttr(\n key='nBits',\n value=n,\n name='bits'\n )\n\n\n @property\n def freqRes(self):\n \"\"\"\n \"\"\"\n return self._freqRes\n @freqRes.setter\n @accepts(object, (float, int, u.Quantity))\n def freqRes(self, df):\n if isinstance(df, u.Quantity):\n df = df.to(u.Hz).value\n self._freqRes = self._checkAttr(\n key='freqRes',\n value=df,\n name='frequency resolution'\n )\n\n\n @property\n def nMAs(self):\n \"\"\"\n \"\"\"\n return self._nMAs\n @nMAs.setter\n @accepts(object, int)\n def nMAs(self, ma):\n self._nMAs = self._checkAttr(\n key='nMAs',\n value=ma,\n name='Mini-Arrays'\n )\n\n\n @property\n def durationSec(self):\n \"\"\"\n \"\"\"\n return self._durationSec\n @durationSec.setter\n @accepts(object, (float, int, TimeDelta), strict=False)\n def durationSec(self, s):\n if isinstance(s, TimeDelta):\n s = s.sec\n self._durationSec = s\n\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n\n\n # --------------------------------------------------------- #\n # ----------------------- Internal ------------------------ #\n def _checkAttr(self, key, value, name):\n attrProp = backendProperties[self._backend][key]\n minVal = attrProp['min']\n maxVal = attrProp['max']\n defaultVal = attrProp['default']\n if value > maxVal:\n log.warning(\n \"Maximal value of {0} is {1}. Setting to default '{2}={3}'.\".format(\n name,\n maxVal,\n key,\n defaultVal\n )\n )\n value = defaultVal\n elif value < minVal:\n log.warning(\n \"Minimal value for {0} is {1}. Setting to default '{2}={3}'.\".format(\n name,\n minVal,\n key,\n defaultVal\n )\n )\n value = defaultVal\n return value\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ------------------------- BSTConfig ------------------------- #\n# ============================================================= #\n@doc('*Beamlet Statistics* observation configuration.', 'bst')\nclass BSTConfig(_BackendConfig):\n \n def __init__(self, **kwargs):\n super().__init__(backend='bst', **kwargs)\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\" Computes an estimation of the data volume of a *BST*\n FITS file.\n\n :getter: Data volume.\n\n :type: :class:`~astropy.units.Quantity`\n\n :Example:\n >>> from nenupy.observation import BSTConfig\n >>> bstconf = BSTConfig(\n durationSec=3600\n )\n >>> bstconf.volume\n 21.09375 Mibyte\n\n >>> from nenupy.observation import BSTConfig\n >>> bstconf = BSTConfig.fromParset(\n 'nenufar_obs.parset'\n )\n >>> bstconf.volume\n XXX Mibyte\n\n .. warning::\n The data volume estimation does not handle\n specificities of the FITS file in which the *BST*\n are stored (in particular metadata and FITS \n architecture). Therefore, the volume may be\n underestimated by a few MB.\n\n \"\"\"\n log.debug(str(self))\n nElements = self.nPolars * self.durationSec * self.nSubBands\n return (nElements * float32).to(u.Mibyte)\n\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n @classmethod\n @accepts(type, (Parset, str))\n def fromParset(cls, parset):\n \"\"\" Returns a :class:`~nenupy.observation.obs_config.BSTConfig`\n instance in which *BST* observation configuration properties\n are set as defined by the ``parset``.\n\n :param parset:\n Observation parset file.\n :type parset: `str` or :class:`~nenupy.observation.parset.Parset`\n\n :returns:\n *BST* configuration as defined by the ``parset`` file.\n :rtype: :class:`~nenupy.observation.obs_config.BSTConfig`\n\n :Example:\n >>> from nenupy.observation import BSTConfig\n >>> bstconf = BSTConfig.fromParset('nenufar_obs.parset')\n\n \"\"\"\n if isinstance(parset, str):\n parset = Parset(parset)\n\n dbeams = parset.digibeams\n \n # Find out the total duration of observation\n # Loop over the digibeams, as they can be simultaneous\n totalTimes = np.array([])\n for db in dbeams.keys():\n dts = TimeDelta(\n np.arange(dbeams[db]['duration']),\n format='sec'\n )\n dbTimes = dbeams[db]['startTime'] + dts\n totalTimes = np.union1d(totalTimes, dbTimes.jd)\n \n return BSTConfig(\n durationSec=totalTimes.size,\n startTime=parset.observation['startTime']\n )\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ----------------------- NICKELConfig ------------------------ #\n# ============================================================= #\n@doc('*NICKEL* correlator observation configuration.', 'nickel')\nclass NICKELConfig(_BackendConfig):\n\n def __init__(self, **kwargs):\n super().__init__(backend='nickel', **kwargs)\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\" Computes an estimation of the data volume of a *NICKEL*\n Measurement Set.\n\n :getter: Data volume.\n\n :type: :class:`~astropy.units.Quantity`\n\n :Example:\n >>> from nenupy.observation import NICKELConfig\n >>> nriconf = NICKELConfig(\n nMAs=56,\n nSubBands=244,\n nChannels=64,\n timeRes=1,\n durationSec=3600\n )\n >>> nriconf.volume.to('Tibyte')\n 2.6112914 Tibyte\n\n >>> from nenupy.observation import NICKELConfig\n >>> nriconf = NICKELConfig.fromParset(\n 'nenufar_obs.parset'\n )\n >>> nriconf.volume\n XXX Gibyte\n\n .. warning::\n The data volume estimation does not handle\n specificities of the Measurement Set.\n Therefore, the volume may be\n underestimated.\n\n \"\"\"\n log.debug(str(self))\n nBaselines = self.nMAs * (self.nMAs - 1)/2 + self.nMAs\n visVolume = self.nPolars * nBaselines * complex64\n ratePerSB = visVolume * self.nChannels / self.timeRes\n ratePerObs = ratePerSB * self.nSubBands\n return (ratePerObs * self.durationSec).to(u.Gibyte)\n\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n @classmethod\n @accepts(type, (Parset, str))\n def fromParset(cls, parset):\n \"\"\" Returns a :class:`~nenupy.observation.obs_config.NICKELConfig`\n instance in which *NICKEL* observation configuration properties\n are set as defined by the ``parset``.\n\n :param parset:\n Observation parset file.\n :type parset: `str` or :class:`~nenupy.observation.parset.Parset`\n\n :returns:\n *NICKEL* configuration as defined by the ``parset`` file.\n :rtype: :class:`~nenupy.observation.obs_config.NICKELConfig`\n\n :Example:\n >>> from nenupy.observation import NICKELConfig\n >>> nriconf = NICKELConfig.fromParset('nenufar_obs.parset')\n\n \"\"\"\n if isinstance(parset, str):\n parset = Parset(parset)\n\n out = parset.output\n anabeams = parset.anabeams\n\n if 'nri_receivers' not in out.keys():\n # Nickel receiver has not been used\n return NICKELConfig(\n startTime=parset.observation['startTime']\n )\n elif 'nickel' not in out['nri_receivers']:\n # Nickel receiver has not been used\n return NICKELConfig(\n startTime=parset.observation['startTime']\n )\n\n # Hypothesis that only one analog beam is used!\n return NICKELConfig(\n durationSec=anabeams[0]['duration'],\n timeRes=out['nri_dumpTime'],\n nSubBands=len(out['nri_subbandList']),\n nChannels=out['nri_channelization'],\n nMAs=len(anabeams[0]['maList']),\n startTime=parset.observation['startTime']\n )\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# --------------------- _UnDySPuTeDConfig --------------------- #\n# ============================================================= #\nclass _UnDySPuTeDConfig(_BackendConfig):\n \"\"\"\n .. versionadded:: 1.2.0\n \"\"\"\n\n def __init__(self, backend, **kwargs):\n super().__init__(backend=backend, **kwargs)\n\n\n def __str__(self):\n className = self.__class__.__name__\n title = \"Backend configuration of type '{}'\\n\".format(className)\n properties = ''\n for i, beam in enumerate(self._beamConfigs):\n attributes = backendProperties[beam._backend].keys()\n properties += '\\tBeam {} Properties: '.format(i)\n for att in attributes:\n properties += \"'{}={}', \".format(att, getattr(beam, att))\n properties += \"'{}={}', \\n\".format('durationSec', getattr(beam, 'durationSec'))\n properties = properties[:-4] # remove the last line skip and coma\n return title + properties\n\n\n # --------------------------------------------------------- #\n # ----------------------- Internal ------------------------ #\n def _parseParameters(self, parameters, pulsar=False):\n \"\"\" Parse values from the digital beam 'parameters'\n entry.\n E.g. 'TF: DF=3.05 DT=10.0 HAMM'\n \"\"\"\n parameters = parameters.lower()\n mode = parameters.split(':')[0]\n if pulsar:\n configs = {\n param.split('=')[0]: param.split('=')[1]\\\n for param in parameters.split('--')\\\n if '=' in param\n }\n configs.update({\n param.rstrip(): True\\\n for param in parameters.split('--')\\\n if '=' not in param\n })\n else:\n configs = {\n param.split('=')[0]: param.split('=')[1]\\\n for param in parameters.split()\\\n if '=' in param\n }\n configs.update({\n param.rstrip(): True\\\n for param in parameters.split('--')\\\n if '=' not in param\n })\n return mode, configs\n\n\n # --------------------------------------------------------- #\n # ----------------------- Internal ------------------------ #\n @staticmethod\n @accepts((float, int, u.Quantity), (float, int, u.Quantity))\n def _checkDFvsDT(dt, df):\n \"\"\" Short time resolutions are impossible for narrow\n frequency resolutions. Some df/dt combinations are\n therefore not allowed.\n \"\"\"\n if isinstance(dt, u.Quantity):\n dt = dt.to(u.s).value\n if isinstance(df, u.Quantity):\n df = df.to(u.Hz).value\n\n allowedFftlen = 2**( np.arange(8) + 4 )\n allowedNfft2int = 2**(np.arange(9) + 2)\n \n # Find the closest fftlen to the desired df value\n fftlen = allowedFftlen[\n np.argmin(\n np.abs(allowedFftlen - 1.0 / 5.12e-6 / df) # 1/(5.12e-6 s) = 195312.5 Hz\n )\n ]\n\n # Find the closest nfft2int to the desired df value\n nfft2int = allowedNfft2int[\n np.argmin(\n np.abs(allowedNfft2int - dt / (5.12e-6 * fftlen))\n )\n ]\n\n dtEff = 5.12e-6 * fftlen * nfft2int\n dfEff = 1.0 / 5.12e-6 / fftlen\n\n log.debug(\n \"'freqRes={0:.2f}', 'timeRes={1:.2f}' <--> 'df={2:.2f}', 'dt={3:.2f}' ('fftlen={4}', 'nfft2int={5}')\".format(\n (df*u.Hz).to(u.kHz),\n (dt*u.s).to(u.ms),\n (dfEff*u.Hz).to(u.kHz),\n (dtEff*u.s).to(u.ms),\n fftlen,\n nfft2int\n )\n )\n\n return dtEff, dfEff\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ----------------------- _TFBeamConfig ----------------------- #\n# ============================================================= #\nclass _TFBeamConfig(_BackendConfig):\n \"\"\"\n .. versionadded:: 1.2.0\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(backend='tf', **kwargs)\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\"\n \"\"\"\n log.debug(str(self))\n ratePerSB = self.nPolars * float32 * (200.e6/1024./self.freqRes) / self.timeRes\n rateObs = ratePerSB * self.nSubBands\n return (rateObs * self.durationSec).to(u.Gibyte)\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ------------------------- TFConfig -------------------------- #\n# ============================================================= #\n@doc('*UnDySPuTeD Time-Frequency* mode observation configuration.', 'tf')\nclass TFConfig(_UnDySPuTeDConfig):\n\n def __init__(self, _setFromParset=False, **kwargs):\n if not _setFromParset:\n super().__init__(backend='tf', **kwargs)\n self.timeRes, self.freqRes = self._checkDFvsDT(\n dt=self.timeRes,\n df=self.freqRes\n )\n kwargs['timeRes'] = self.timeRes\n kwargs['freqRes'] = self.freqRes\n self._beamConfigs = [\n _TFBeamConfig(**kwargs)\n ]\n else:\n self._beamConfigs = []\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\" Computes an estimation of the data volume of an\n *UnDySPuTeD-TF* observation file.\n\n :getter: Data volume.\n\n :type: :class:`~astropy.units.Quantity`\n\n :Example:\n >>> from nenupy.observation import TFConfig\n >>> tfconf = TFConfig(\n nSubBands=500,\n timeRes=42e-3,\n freqRes=200,\n durationSec=3600\n )\n >>> tfconf.volume\n 654.83619 Gibyte\n\n >>> from nenupy.observation import TFConfig\n >>> tfconf = TFConfig.fromParset(\n 'nenufar_obs.parset'\n )\n >>> tfconf.volume\n XXX Gibyte\n\n .. note::\n Combinations of ``timeRes`` and ``freqRes`` pairs\n are limited to that available within the\n *UnDySPuTeD* receiver. If set otherwise, the closest\n allowed values will be filled instead.\n\n One can check the corresponding attributes\n after setting up the desired configuration:\n \n >>> tfconf = TFConfig(\n nSubBands=500,\n timeRes=1e-3,\n freqRes=200,\n durationSec=3600\n )\n >>> tfconf.timeRes\n 0.02097152\n >>> tfconf.freqRes\n 190.73486328125\n\n Altenatively, one can set the log to ``DEBUG``\n in order to print conversion details:\n\n >>> import logging\n >>> logging.getLogger('nenupy').setLevel(logging.DEBUG)\n >>> nriconf = TFConfig(\n nSubBands=500,\n timeRes=1e-3,\n freqRes=200,\n durationSec=3600\n )\n 2020-12-16 17:28:52 -- DEBUG: 'freqRes=0.20 kHz', 'timeRes=1.00 ms' <--> 'df=0.19 kHz', 'dt=20.97 ms' ('fftlen=1024', 'nfft2int=4')\n\n \"\"\"\n vol = 0 * u.Gibyte\n for bc in self._beamConfigs:\n vol += bc.volume\n return vol\n\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n @classmethod\n @accepts(type, (str, Parset))\n def fromParset(cls, parset):\n \"\"\" Returns a :class:`~nenupy.observation.obs_config.TFConfig`\n instance in which *UnDySPuTeD-TF* observation configuration\n properties are set as defined by the ``parset``.\n\n :param parset:\n Observation parset file.\n :type parset: `str` or :class:`~nenupy.observation.parset.Parset`\n\n :returns:\n *UnDySPuTeD-TF* configuration as defined by the ``parset`` file.\n :rtype: :class:`~nenupy.observation.obs_config.TFConfig`\n\n :Example:\n >>> from nenupy.observation import TFConfig\n >>> tfconf = TFConfig.fromParset('nenufar_obs.parset')\n\n \"\"\"\n if isinstance(parset, str):\n parset = Parset(parset)\n\n out = parset.output\n digibeams = parset.digibeams\n\n tf = TFConfig(_setFromParset=True)\n tf.startTime = parset.observation['startTime']\n beamConfigs = []\n \n if 'undysputed' not in out['hd_receivers']:\n # UnDySPuTeD receiver has not been used\n pass\n else:\n for db in digibeams.keys():\n if digibeams[db]['toDo'].lower() != 'dynamicspectrum':\n continue\n try:\n mode, parameters = tf._parseParameters(digibeams[db]['parameters'])\n except KeyError:\n log.warning(\n \"Parset '{}' has no 'parameters' key.\".format(parset.parset)\n )\n continue\n if mode != 'tf':\n continue\n dt, df = tf._checkDFvsDT(\n dt=(float(parameters['dt'])*u.ms).to(u.s).value,\n df=(float(parameters['df'])*u.kHz).to(u.Hz).value\n )\n beamConfigs.append(\n _TFBeamConfig(\n timeRes=dt,\n freqRes=df,\n durationSec=digibeams[db]['duration'],\n nSubBands=len(digibeams[db]['subbandList'])\n )\n )\n \n tf._beamConfigs = beamConfigs\n return tf\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ---------------------- _RawBeamConfig ----------------------- #\n# ============================================================= #\nclass _RawBeamConfig(_BackendConfig):\n \"\"\"\n .. versionadded:: 1.2.0\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(backend='raw', **kwargs)\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\"\n \"\"\"\n log.debug(str(self))\n duration = self.durationSec - 60 # Burning time at start\n nBytes = self.nBits / 8 * u.byte\n rateSB = self.nPolars * nBytes / 5.12e-6\n rateObs = rateSB * self.nSubBands\n return (rateObs * duration).to(u.Gibyte)\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ------------------------- RAWConfig ------------------------- #\n# ============================================================= #\n@doc('*UnDySPuTeD Waveform* mode observation configuration.', 'raw')\nclass RAWConfig(_UnDySPuTeDConfig):\n\n def __init__(self, _setFromParset=False, **kwargs):\n if not _setFromParset:\n super().__init__(backend='raw', **kwargs)\n self._beamConfigs = [\n _RawBeamConfig(**kwargs)\n ]\n else:\n self._beamConfigs = []\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\" Computes an estimation of the data volume of an\n *UnDySPuTeD-RAW* observation file.\n\n :getter: Data volume.\n\n :type: :class:`~astropy.units.Quantity`\n\n :Example:\n >>> from nenupy.observation import RAWConfig\n >>> rawconf = RAWConfig(\n durationSec=3600\n )\n >>> rawconf.volume\n 494.53229 Gibyte\n\n >>> from nenupy.observation import RAWConfig\n >>> rawconf = RAWConfig.fromParset(\n 'nenufar_obs.parset'\n )\n >>> rawconf.volume\n XXX Gibyte\n\n \"\"\"\n vol = 0 * u.Gibyte\n for bc in self._beamConfigs:\n vol += bc.volume\n return vol\n\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n @classmethod\n @accepts(type, (Parset, str))\n def fromParset(cls, parset):\n \"\"\" Returns a :class:`~nenupy.observation.obs_config.RAWConfig`\n instance in which *UnDySPuTeD-RAW* observation configuration\n properties are set as defined by the ``parset``.\n\n :param parset:\n Observation parset file.\n :type parset: `str` or :class:`~nenupy.observation.parset.Parset`\n\n :returns:\n *UnDySPuTeD-RAW* configuration as defined by the ``parset`` file.\n :rtype: :class:`~nenupy.observation.obs_config.RAWConfig`\n\n :Example:\n >>> from nenupy.observation import RAWConfig\n >>> rawconf = RAWConfig.fromParset('nenufar_obs.parset')\n\n \"\"\"\n if isinstance(parset, str):\n parset = Parset(parset)\n\n out = parset.output\n digibeams = parset.digibeams\n\n raw = RAWConfig(_setFromParset=True)\n raw.startTime = parset.observation['startTime']\n\n beamConfigs = []\n \n if 'undysputed' not in out['hd_receivers']:\n # UnDySPuTeD receiver has not been used\n pass\n else:\n for db in digibeams.keys():\n if digibeams[db]['toDo'].lower() != 'waveform':\n continue\n beamConfigs.append(\n _RawBeamConfig(\n durationSec=digibeams[db]['duration'],\n nSubBands=len(digibeams[db]['subbandList'])\n )\n )\n \n raw._beamConfigs = beamConfigs\n return raw\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ---------------------- _FoldBeamConfig ---------------------- #\n# ============================================================= #\nclass _FoldBeamConfig(_BackendConfig):\n \"\"\"\n .. versionadded:: 1.2.0\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(backend='pulsar_fold', **kwargs)\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\"\n \"\"\"\n log.debug(str(self))\n duration = self.durationSec - 60 # Burning time at start\n duration = 0 if duration < 0 else duration\n rateObs = self.nSubBands * self.nPolars * float32 * self.nBins / self.tFold\n return (rateObs * duration).to(u.Gibyte)\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ---------------------- PulsarFoldConfig --------------------- #\n# ============================================================= #\n@doc('*UnDySPuTeD Pulsar-FOLD* mode observation configuration.', 'pulsar_fold')\nclass PulsarFoldConfig(_UnDySPuTeDConfig):\n\n def __init__(self, _setFromParset=False, **kwargs):\n if not _setFromParset:\n super().__init__(backend='pulsar_fold', **kwargs)\n self._beamConfigs = [\n _FoldBeamConfig(**kwargs)\n ]\n else:\n self._beamConfigs = []\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\" Computes an estimation of the data volume of an\n *UnDySPuTeD Pulsar-FOLD* observation file.\n\n :getter: Data volume.\n\n :type: :class:`~astropy.units.Quantity`\n\n :Example:\n >>> from nenupy.observation import PulsarFoldConfig\n >>> foldconf = PulsarFoldConfig(\n durationSec=3600\n )\n >>> foldconf.volume\n 1.9317667 Gibyte\n\n >>> from nenupy.observation import PulsarFoldConfig\n >>> foldconf = PulsarFoldConfig.fromParset(\n 'nenufar_obs.parset'\n )\n >>> foldconf.volume\n XXX Gibyte\n\n \"\"\"\n vol = 0 * u.Gibyte\n for bc in self._beamConfigs:\n vol += bc.volume\n return vol\n\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n @classmethod\n @accepts(type, (Parset, str))\n def fromParset(cls, parset):\n \"\"\" Returns a :class:`~nenupy.observation.obs_config.PulsarFoldConfig`\n instance in which *UnDySPuTeD Pulsar-FOLD* observation configuration\n properties are set as defined by the ``parset``.\n\n :param parset:\n Observation parset file.\n :type parset: `str` or :class:`~nenupy.observation.parset.Parset`\n\n :returns:\n *UnDySPuTeD Pulsar-FOLD* configuration as defined by the ``parset`` file.\n :rtype: :class:`~nenupy.observation.obs_config.PulsarFoldConfig`\n\n :Example:\n >>> from nenupy.observation import PulsarFoldConfig\n >>> foldconf = PulsarFoldConfig.fromParset('nenufar_obs.parset')\n\n \"\"\"\n if isinstance(parset, str):\n parset = Parset(parset)\n\n out = parset.output\n digibeams = parset.digibeams\n\n fold = PulsarFoldConfig(_setFromParset=True)\n fold.startTime = parset.observation['startTime']\n\n beamConfigs = []\n \n if 'undysputed' not in out['hd_receivers']:\n # UnDySPuTeD receiver has not been used\n pass\n else:\n for db in digibeams.keys():\n if digibeams[db]['toDo'].lower() != 'pulsar':\n continue\n try:\n mode, parameters = fold._parseParameters(\n digibeams[db]['parameters'],\n pulsar=True\n )\n except KeyError:\n log.warning(\n \"Parset '{}' has no 'parameters' key.\".format(parset.parset)\n )\n continue\n if mode != 'fold':\n continue\n \n props = backendProperties['pulsar_fold']\n \n beamConfigs.append(\n _FoldBeamConfig(\n nSubBands=len(digibeams[db]['subbandList']),\n nPolars=1 if 'onlyi' in parameters else 4,\n tFold=float(parameters.get('tfold', props['tFold']['default'])),\n durationSec=digibeams[db]['duration'],\n nBins=int(parameters.get('nbin', props['nBins']['default']))\n )\n )\n\n fold._beamConfigs = beamConfigs\n return fold\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# -------------------- _WaveolafBeamConfig -------------------- #\n# ============================================================= #\nclass _WaveolafBeamConfig(_BackendConfig):\n \"\"\"\n .. versionadded:: 1.2.0\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(backend='pulsar_waveolaf', **kwargs)\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\"\n \"\"\"\n log.debug(str(self))\n duration = self.durationSec - 60 # Burning time at start\n duration = 0 if duration < 0 else duration\n rateObs = 451590 * u.byte * self.nSubBands\n return (rateObs * duration).to(u.Gibyte)\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ---------------------- PulsarWaveConfig --------------------- #\n# ============================================================= #\n@doc('*UnDySPuTeD Pulsar-WAVEOLAF* mode observation configuration.', 'pulsar_waveolaf')\nclass PulsarWaveConfig(_UnDySPuTeDConfig):\n\n def __init__(self, _setFromParset=False, **kwargs):\n if not _setFromParset:\n super().__init__(backend='pulsar_waveolaf', **kwargs)\n self._beamConfigs = [\n _WaveolafBeamConfig(**kwargs)\n ]\n else:\n self._beamConfigs = []\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\" Computes an estimation of the data volume of an\n *UnDySPuTeD Pulsar-WAVEOLAF* observation file.\n\n :getter: Data volume.\n\n :type: :class:`~astropy.units.Quantity`\n\n :Example:\n >>> from nenupy.observation import PulsarWaveConfig\n >>> waveconf = PulsarWaveConfig(\n durationSec=3600\n )\n >>> waveconf.volume\n 285.85707 Gibyte\n\n >>> from nenupy.observation import PulsarWaveConfig\n >>> waveconf = PulsarWaveConfig.fromParset(\n 'nenufar_obs.parset'\n )\n >>> waveconf.volume\n XXX Gibyte\n\n \"\"\"\n vol = 0 * u.Gibyte\n for bc in self._beamConfigs:\n vol += bc.volume\n return vol\n\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n @classmethod\n @accepts(type, (Parset, str))\n def fromParset(cls, parset):\n \"\"\" Returns a :class:`~nenupy.observation.obs_config.PulsarWaveConfig`\n instance in which *UnDySPuTeD Pulsar-WAVEOLAF* observation configuration\n properties are set as defined by the ``parset``.\n\n :param parset:\n Observation parset file.\n :type parset: `str` or :class:`~nenupy.observation.parset.Parset`\n\n :returns:\n *UnDySPuTeD Pulsar-WAVEOLAF* configuration as defined by the ``parset`` file.\n :rtype: :class:`~nenupy.observation.obs_config.PulsarWaveConfig`\n\n :Example:\n >>> from nenupy.observation import PulsarWaveConfig\n >>> waveconf = PulsarWaveConfig.fromParset('nenufar_obs.parset')\n\n \"\"\"\n if isinstance(parset, str):\n parset = Parset(parset)\n\n out = parset.output\n digibeams = parset.digibeams\n\n wave = PulsarWaveConfig(_setFromParset=True)\n wave.startTime = parset.observation['startTime']\n\n beamConfigs = []\n \n if 'undysputed' not in out['hd_receivers']:\n # UnDySPuTeD receiver has not been used\n pass\n else:\n for db in digibeams.keys():\n if digibeams[db]['toDo'].lower() != 'pulsar':\n continue\n try:\n mode, parameters = wave._parseParameters(\n digibeams[db]['parameters'],\n pulsar=True\n )\n except KeyError:\n log.warning(\n \"Parset '{}' has no 'parameters' key.\".format(parset.parset)\n )\n continue\n if mode != 'waveolaf':\n continue\n \n props = backendProperties['pulsar_waveolaf']\n \n beamConfigs.append(\n _WaveolafBeamConfig(\n nSubBands=len(digibeams[db]['subbandList']),\n durationSec=digibeams[db]['duration']\n )\n )\n\n wave._beamConfigs = beamConfigs\n return wave\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# --------------------- _SingleBeamConfig --------------------- #\n# ============================================================= #\nclass _SingleBeamConfig(_BackendConfig):\n \"\"\"\n .. versionadded:: 1.2.0\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(backend='pulsar_single', **kwargs)\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\"\n \"\"\"\n log.debug(str(self))\n duration = self.durationSec - 60 # Burning time at start\n duration = 0 if duration < 0 else duration\n nBytes = self.nBits / 8 * u.byte\n rateObs = self.nSubBands * self.nPolars * nBytes /5.12e-6 / self.dsTime\n return (rateObs * duration).to(u.Gibyte)\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# --------------------- PulsarSingleConfig -------------------- #\n# ============================================================= #\n@doc('*UnDySPuTeD Pulsar-SINGLE* mode observation configuration.', 'pulsar_single')\nclass PulsarSingleConfig(_UnDySPuTeDConfig):\n\n def __init__(self, _setFromParset=False, **kwargs):\n if not _setFromParset:\n super().__init__(backend='pulsar_single', **kwargs)\n self._beamConfigs = [\n _SingleBeamConfig(**kwargs)\n ]\n else:\n self._beamConfigs = []\n\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\" Computes an estimation of the data volume of an\n *UnDySPuTeD Pulsar-SINGLE* observation file.\n\n :getter: Data volume.\n\n :type: :class:`~astropy.units.Quantity`\n\n :Example:\n >>> from nenupy.observation import PulsarSingleConfig\n >>> singleconf = PulsarSingleConfig(\n durationSec=3600\n )\n >>> singleconf.volume\n 15.454134 Gibyte\n\n >>> from nenupy.observation import PulsarSingleConfig\n >>> singleconf = PulsarSingleConfig.fromParset(\n 'nenufar_obs.parset'\n )\n >>> singleconf.volume\n XXX Gibyte\n\n \"\"\"\n vol = 0 * u.Gibyte\n for bc in self._beamConfigs:\n vol += bc.volume\n return vol\n\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n @classmethod\n @accepts(type, (Parset, str))\n def fromParset(cls, parset):\n \"\"\" Returns a :class:`~nenupy.observation.obs_config.PulsarSingleConfig`\n instance in which *UnDySPuTeD Pulsar-SINGLE* observation configuration\n properties are set as defined by the ``parset``.\n\n :param parset:\n Observation parset file.\n :type parset: `str` or :class:`~nenupy.observation.parset.Parset`\n\n :returns:\n *UnDySPuTeD Pulsar-SINGLE* configuration as defined by the ``parset`` file.\n :rtype: :class:`~nenupy.observation.obs_config.PulsarSingleConfig`\n\n :Example:\n >>> from nenupy.observation import PulsarSingleConfig\n >>> waveconf = PulsarSingleConfig.fromParset('nenufar_obs.parset')\n\n \"\"\"\n if isinstance(parset, str):\n parset = Parset(parset)\n\n out = parset.output\n digibeams = parset.digibeams\n\n single = PulsarSingleConfig(_setFromParset=True)\n single.startTime = parset.observation['startTime']\n\n beamConfigs = []\n \n if 'undysputed' not in out['hd_receivers']:\n # UnDySPuTeD receiver has not been used\n pass\n else:\n for db in digibeams.keys():\n if digibeams[db]['toDo'].lower() != 'pulsar':\n continue\n try:\n mode, parameters = single._parseParameters(\n digibeams[db]['parameters'],\n pulsar=True\n )\n except KeyError:\n log.warning(\n \"Parset '{}' has no 'parameters' key.\".format(parset.parset)\n )\n continue\n if mode != 'single':\n continue\n \n props = backendProperties['pulsar_single']\n\n beamConfigs.append(\n _SingleBeamConfig(\n nSubBands=len(digibeams[db]['subbandList']),\n nPolars=1 if 'onlyi' in parameters else 4,\n dsTime=int(parameters.get('dstime', props['dsTime']['default'])),\n durationSec=digibeams[db]['duration'],\n nBits=int(parameters.get('nbits', props['nBits']['default']))\n )\n )\n\n single._beamConfigs = beamConfigs\n return single\n# ============================================================= #\n# ============================================================= #\n\n\nbackendClasses = {\n 'nickel': NICKELConfig,\n 'raw': RAWConfig,\n 'tf': TFConfig,\n 'bst': BSTConfig,\n # 'sst': '',\n # 'xst': '',\n 'pulsar_fold': PulsarFoldConfig,\n 'pulsar_waveolaf': PulsarWaveConfig,\n 'pulsar_single': PulsarSingleConfig\n}\n\n\n# ============================================================= #\n# ------------------------- ObsConfig ------------------------- #\n# ============================================================= #\nclass ObsConfig(object):\n \"\"\" Main observation configuration class.\n\n .. versionadded:: 1.2.0\n \"\"\"\n\n def __init__(self):\n for key in backendClasses:\n setattr(self, key, [])\n\n\n def __add__(self, other):\n if not isinstance(other, self.__class__):\n raise TypeError('{} expected.'.format(self.__class__))\n\n new = ObsConfig()\n for key in backendClasses:\n summedVal = getattr(self, key) + getattr(other, key)\n setattr(new, key, summedVal)\n return new\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def volume(self):\n \"\"\" Computes an estimation of the data volume output for\n all the *NenuFAR* receivers. If the object\n :class:`~nenupy.observation.obs_config.ObsConfig` has been\n set with several parset files (with the method\n :meth:`~nenupy.observation.obs_config.ObsConfig.fromParsetList`),\n the volumes are summed over all observations.\n\n :getter: Data volume.\n\n :type: `dict` of :class:`~astropy.units.Quantity`\n\n :Example:\n >>> from nenupy.observation import ObsConfig\n >>> obsconf = ObsConfig.fromParset(\n 'nenufar_obs.parset'\n )\n >>> obsconf.volume\n {'nickel': <Quantity 0. Gibyte>,\n 'raw': <Quantity 0. Gibyte>,\n 'tf': <Quantity 0. Gibyte>,\n 'bst': <Quantity 20.625 Mibyte>,\n 'pulsar_fold': <Quantity 3.7763691 Gibyte>,\n 'pulsar_waveolaf': <Quantity 558.79404545 Gibyte>,\n 'pulsar_single': <Quantity 0. Gibyte>}\n\n \"\"\"\n volumes = {}\n for key in backendClasses.keys():\n volumes[key] = sum([subconf.volume for subconf in getattr(self, key)])\n return volumes\n\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n @classmethod\n @accepts(type, (Parset, str))\n def fromParset(cls, parset):\n \"\"\" Returns a :class:`~nenupy.observation.obs_config.ObsConfig`\n instance in which all *NenuFAR* receiver configuration\n properties are set as defined by the ``parset``.\n\n :param parset:\n Observation parset file.\n :type parset: `str` or :class:`~nenupy.observation.parset.Parset`\n\n :returns:\n Full *NenuFAR* receiver configurations as defined by the ``parset`` file.\n :rtype: :class:`~nenupy.observation.obs_config.ObsConfig`\n\n :Example:\n >>> from nenupy.observation import ObsConfig\n >>> obsconf = ObsConfig.fromParset('nenufar_obs.parset')\n\n \"\"\"\n if isinstance(parset, str):\n parset = Parset(parset)\n\n dv = ObsConfig()\n for key in backendClasses.keys():\n #setattr(dv, key, backendClasses[key].fromParset(parset))\n getattr(dv, key).append(backendClasses[key].fromParset(parset))\n return dv\n\n\n @classmethod\n @accepts(type, list)\n def fromParsetList(cls, parsetList):\n \"\"\" Returns a :class:`~nenupy.observation.obs_config.ObsConfig`\n instance in which all *NenuFAR* receiver configuration\n properties are set as defined by each parset file conained\n in ``parsetList``.\n\n :param parsetList:\n List of observation parset files.\n :type parsetList: `list` of `str` or :class:`~nenupy.observation.parset.Parset`\n\n :returns:\n *NenuFAR* receiver configurations for all observations defined\n by the parset files listed in ``parsetList``.\n :rtype: :class:`~nenupy.observation.obs_config.ObsConfig`\n\n :Example:\n >>> from nenupy.observation import ObsConfig\n >>> obsconf = ObsConfig.fromParsetList(\n ['nenufar_obs_1.parset', 'nenufar_obs_2.parset']\n )\n\n \"\"\"\n if not isinstance(parsetList, list):\n raise TypeError(\n \"`parsetList` should be a `list`.\"\n )\n\n tot = ObsConfig()\n for parset in parsetList:\n obs = ObsConfig.fromParset(parset)\n tot += obs\n\n return tot\n\n\n @accepts(object, str, (str, u.Quantity), strict=False)\n def getCumulativeVolume(self, receiver, unit='Tibyte'):\n \"\"\" Gets an estimation of the cumulative raw data volume\n over time\n computed from the observations listed in the current\n :class:`~nenupy.observation.obs_config.ObsConfig`\n instance for the given ``receiver``.\n\n :param receiver:\n Name of the receiver from which the cumulative data\n volume is estimated.\n :type receiver: `str`\n :param unit:\n Data volume unit in which the cumulative volume\n will be expressed (see also\n `binary unit prefixes <https://docs.astropy.org/en/stable/units/standard_units.html#prefixes>`_).\n :type unit: `str` or :class:`~astropy.units.Quantity`\n\n :returns: \n Observation start times and cumulative data volumes.\n :rtype: (:class:`~astropy.time.Time`, :class:`~numpy.ndarray`)\n\n :Example:\n >>> from nenupy.observation import ObsConfig\n >>> obsconf = ObsConfig.fromParsetList(\n ['nenufar_obs_1.parset', 'nenufar_obs_2.parset']\n )\n >>> times, volumes = obsconf.getCumulativeVolume(\n receiver='nickel',\n unit='Gibyte'\n )\n\n \"\"\"\n if receiver not in backendClasses.keys():\n raise ValueError(\n f\"Receiver '{receiver}' not in '{backendClasses.keys()}'\"\n )\n obs_list = getattr(self, receiver)\n times = Time([obs.startTime for obs in obs_list])\n indices = np.argsort(times.mjd)\n times = times[indices]\n volumes = np.array([obs.volume.to(unit).value for obs in obs_list])\n cumVol = np.cumsum(volumes[indices])\n return times, cumVol\n\n\n def plotCumulativeVolume(self, figname='', **kwargs):\n \"\"\" Plots the cumulative raw data volume estimation.\n\n :param figname:\n Figure name to store. If set to ``''`` (by default),\n the figure is only displayed.\n :type figname: `str`\n :param figsize:\n Figure size in inches (default: ``(10, 5)``).\n :type figsize: `tuple`\n :param unit:\n Data volume unit in which the cumulative\n volume will be expressed (see also\n `binary unit prefixes <https://docs.astropy.org/en/stable/units/standard_units.html#prefixes>`_).\n Default is ``'Tibyte'``.\n :type unit: `str` or :class:`~astropy.units.Quantity`\n :param receivers:\n List of receivers whose cumulative data\n volumes must be plotted. Default: all\n available *NenuFAR* receivers.\n :type receivers: `list` of `str`\n :param scale:\n y-axis scaling (``'linear'`` or ``'log'``).\n :type scale: `str`\n :param title:\n Title of the plot.\n :type title: `str`\n :param grid:\n Add a grid to help the visualization. Default is ``True``.\n :type grid: `bool`\n :param tMin:\n Minimum time to represent.\n :type tMin: `str` or :class:`~astropy.time.Time`\n :param tMax:\n Maximum time to represent.\n :type tMax: `str` or :class:`~astropy.time.Time`\n\n \"\"\"\n import matplotlib.pylab as plt\n from itertools import cycle\n \n fig = plt.figure(\n figsize=kwargs.get('figsize', (10, 5))\n )\n unit = kwargs.get('unit', 'Tibyte')\n\n receivers = kwargs.get('receivers', list(backendClasses.keys()))\n if not isinstance(receivers, list):\n raise TypeError(\n \"`receivers` must be set as a list.\"\n )\n lStyles = [\n 'solid',\n 'dotted',\n 'dashed',\n 'dashdot',\n (0, (5, 5)), # loose dashed\n (0, (3, 5, 1, 5, 1, 5)), # dashdotteddotted\n (0, (3, 1, 1, 1, 1, 1)) # dense dashdotteddotted\n ]\n linecycler = cycle(lStyles)\n \n volCumDico = {}\n for receiver in receivers:\n times, cumVol = self.getCumulativeVolume(\n receiver=receiver,\n unit=unit\n )\n volCumDico[receiver] = {\n 'times': times,\n 'cumulative_sum': cumVol\n }\n plt.plot(\n times.datetime,\n cumVol,\n label=receiver,\n linewidth=1,\n linestyle=next(linecycler)\n )\n\n plt.plot(\n times.datetime,\n sum([volCumDico[k]['cumulative_sum'] for k in volCumDico.keys()]),\n label='Total',\n color='black',\n linestyle='solid',\n linewidth=2,\n )\n\n plt.yscale(kwargs.get('scale', 'linear'))\n plt.legend()\n plt.xlabel('UTC Time')\n plt.ylabel(f'Raw data volume ({unit})')\n plt.title(kwargs.get('title', ''))\n plt.xlim(\n (\n Time(kwargs.get('tMin', times[0])).datetime,\n Time(kwargs.get('tMax', times[-1])).datetime\n )\n )\n if kwargs.get('grid', True):\n plt.grid()\n \n if figname == '':\n plt.show()\n else:\n plt.savefig(\n figname,\n dpi=300,\n transparent=True,\n bbox_inches='tight'\n )\n# ============================================================= #\n# ============================================================= #\n\n"
] |
[
[
"matplotlib.pylab.show",
"numpy.abs",
"matplotlib.pylab.grid",
"numpy.arange",
"numpy.union1d",
"numpy.cumsum",
"matplotlib.pylab.xlabel",
"matplotlib.pylab.ylabel",
"numpy.float32",
"matplotlib.pylab.legend",
"numpy.argsort",
"matplotlib.pylab.savefig",
"numpy.array",
"numpy.complex64"
]
] |
LiyuanLucasLiu/FasterTransformer
|
[
"c28149096030286e87491c7648f5a020aed22cc9"
] |
[
"sample/pytorch/encoder_sample.py"
] |
[
"# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport os\nimport argparse\nimport timeit\nimport torch\nimport torch.cuda.nvtx as nvtx\nimport time\n\nfrom utils.encoder import EncoderWeights, CustomEncoder\nimport threading\n\ndef sequence_mask(lengths, max_len=None, is_2d=True):\n batch_size = lengths.numel()\n max_len = max_len or lengths.max()\n mask = (torch.arange(0, max_len, device=lengths.device)\n .type_as(lengths)\n .repeat(batch_size, 1)\n .lt(lengths.unsqueeze(1)))\n if is_2d:\n return mask\n else:\n mask = mask.view(-1, 1, 1, max_len)\n m2 = mask.transpose(2, 3)\n return mask * m2\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('batch_size', type=int,\n help='batch size')\n parser.add_argument('layer_num', type=int,\n help='number of layers')\n parser.add_argument('seq_len', type=int,\n help='sequence length')\n parser.add_argument('head_num', type=int,\n help='head number')\n parser.add_argument('head_size', type=int,\n help='size per head')\n parser.add_argument('--size_ratio_to_full', type=int, default=1)\n parser.add_argument('--fp16', action='store_true',\n help='is fp16')\n parser.add_argument('--int8_mode', type=int, default=0, metavar='NUMBER',\n help='int8 mode (default: 0)', choices=[0, 1, 2, 3])\n parser.add_argument('--time', action='store_true',\n help='test the time or not.')\n parser.add_argument('--avg_seq_len', type=int, default=-1, metavar='NUMBER',\n help='average sequence length (default: -1)')\n parser.add_argument('--remove_padding', action='store_true',\n help='Remove the padding of sentences of encoder.')\n parser.add_argument('--allow_gemm_test', action='store_true',\n help='Whether allow gemm test inside FT.')\n parser.add_argument('--weight_path', type=str,\n default=None,\n help='path containing the pretrained weights')\n parser.add_argument('--ths_path', type=str, default='./lib/libpyt_fastertransformer.so',\n help='path of the pyt_fastertransformer dynamic lib file')\n parser.add_argument('-thread_num', '--thread_num', type=int, default=1, metavar='int',\n help='Testing multithread if thread_num > 1.')\n args = parser.parse_args()\n\n batch_size = args.batch_size\n seq_len = args.seq_len\n if args.weight_path is not None:\n if 'large' in args.weight_path:\n layer_num = 24\n head_num = 16\n head_size = 64\n elif 'base' in args.weight_path:\n layer_num = 12\n head_num = 12\n head_size = 64\n else:\n layer_num = args.layer_num\n head_num = args.head_num\n head_size = args.head_size\n else:\n layer_num = args.layer_num\n head_num = args.head_num\n head_size = args.head_size\n hidden_dim = 768 #head_num * head_size\n\n if args.int8_mode == 1:\n per_channel = True\n elif args.int8_mode == 2 or args.int8_mode == 3:\n per_channel = False\n elif args.int8_mode != 0:\n raise ValueError(\"wrong int8_mode argument\")\n\n print(\"\\n=============== Argument ===============\")\n print('batch_size: ' + str(batch_size))\n print('layer_num: ' + str(layer_num))\n print('seq_len: ' + str(seq_len))\n print('head_num: ' + str(head_num))\n print('head_size: ' + str(head_size))\n print('hidden_dim: ' + str(hidden_dim))\n print('weight_path: ' + str(args.weight_path))\n print('use_fp16: ' + str(args.fp16))\n print('int8_mode: ' + str(args.int8_mode))\n print('avg_seq_len: ' + str(args.avg_seq_len))\n print('test_time: ' + str(args.time))\n print('remove_padding: ' + str(args.remove_padding))\n print('allow_gemm_test: ' + str(args.allow_gemm_test))\n print('ratio-to-full-bert-base-layer: ' + str(args.size_ratio_to_full))\n print(\"========================================\\n\")\n\n inp = torch.empty(batch_size, seq_len, hidden_dim).cuda()\n torch.nn.init.uniform_(inp, -1, 1)\n mem_seq_lens = torch.randint(1, seq_len+1, (batch_size,), dtype=torch.int32).cuda()\n if args.remove_padding:\n if args.avg_seq_len > 0:\n mem_seq_lens = torch.ones((batch_size,)) * args.avg_seq_len\n mem_seq_lens = mem_seq_lens.to(torch.int32).cuda()\n elif args.avg_seq_len == -1:\n mem_seq_lens = torch.ones((batch_size,)) * seq_len / 2\n mem_seq_lens = mem_seq_lens.to(torch.int32).cuda()\n else:\n raise ValueError(\"wrong avg_seq_len\")\n\n mask = sequence_mask(mem_seq_lens, args.seq_len, False).to(torch.float)\n # mask = torch.randint(0, 2, (batch_size, seq_len, seq_len), dtype=torch.float32).cuda()\n if args.fp16 or args.int8_mode != 0:\n inp = inp.half()\n mask = mask.half()\n\n pretrained_weights = torch.load(args.weight_path) if (args.weight_path is not None) else None\n weights = EncoderWeights(layer_num, hidden_dim, pretrained_weights, size_ratio_to_full=args.size_ratio_to_full)\n\n if args.int8_mode != 0:\n weights.to_int8(per_channel, args.ths_path)\n elif args.fp16:\n weights.to_half()\n weights.to_cuda()\n custom_encoder = CustomEncoder(layer_num, head_num, head_size, weights,\n int8_mode=args.int8_mode,\n remove_padding=False, allow_gemm_test=args.allow_gemm_test,\n path=args.ths_path)\n\n eff_custom_encoder = CustomEncoder(layer_num, head_num, head_size, weights,\n int8_mode=args.int8_mode,\n remove_padding=True, allow_gemm_test=args.allow_gemm_test,\n path=args.ths_path)\n custom_encoder = torch.jit.script(custom_encoder)\n eff_custom_encoder = torch.jit.script(eff_custom_encoder)\n\n with torch.no_grad():\n output_mask = sequence_mask(mem_seq_lens, args.seq_len).to(mask.dtype).unsqueeze(-1)\n\n ft_output = custom_encoder(inp, mask, mem_seq_lens)[0] * output_mask\n # print(ft_output)\n print(ft_output.size())\n\n eff_ft_output = eff_custom_encoder(inp, mask, mem_seq_lens)[0] * output_mask\n # print(eff_ft_output)\n print(eff_ft_output.size())\n\n if args.time:\n iterations = 1000\n\n for i in range(iterations):\n output = custom_encoder(inp, mask, mem_seq_lens)\n t20 = timeit.default_timer()\n # nvtx.range_push(\"ext\")\n for i in range(iterations):\n # nvtx.range_push(\"ext\"+str(i))\n output = custom_encoder(inp, mask, mem_seq_lens)\n # nvtx.range_pop()\n # nvtx.range_pop()\n t2 = timeit.default_timer() - t20\n time.sleep(10)\n\n for i in range(iterations):\n output = eff_custom_encoder(inp, mask, mem_seq_lens)\n t30 = timeit.default_timer()\n # nvtx.range_push(\"eff_ext\")\n for i in range(iterations):\n # nvtx.range_push(\"eff_ext\"+str(i))\n output = eff_custom_encoder(inp, mask, mem_seq_lens)\n # nvtx.range_pop()\n # nvtx.range_pop()\n t3 = timeit.default_timer() - t30\n time.sleep(10)\n print(\"[INFO] FasterTransformer time costs: {:.2f} ms\".format(t2*1000/iterations))\n print(\"[INFO] EFF-FasterTransformer time costs: {:.2f} ms\".format(t3*1000/iterations))\n\n if args.thread_num > 1:\n # Multi-threading demonstration\n thread_list = []\n thread_num = args.thread_num\n iterations = 100\n def run():\n t40 = timeit.default_timer()\n for i in range(iterations):\n output = custom_encoder(inp, mask, mem_seq_lens)\n t4 = timeit.default_timer() - t40\n print(\"[INFO] batch_size {} max_seq_len {} {} layer FT-OP-time {:6.2f} ms with {} threads\".format(batch_size,\n seq_len, layer_num, t4, thread_num))\n\n for i in range(thread_num):\n thread_list.append(threading.Thread(target=run, name=\"RunFT\"))\n for t in thread_list:\n t.start()\n for t in thread_list:\n t.join()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.jit.script",
"torch.nn.init.uniform_",
"torch.randint",
"torch.empty",
"torch.ones",
"torch.load",
"torch.no_grad",
"torch.arange"
]
] |
gschivley/ElectricityLCI
|
[
"1c1c1b69705d3ffab1e1e844aaf7379e4f51198e"
] |
[
"example/combined_build.py"
] |
[
"# -*- coding: utf-8 -*-\nimport electricitylci\nimport pandas as pd\nimport pickle as pkl\n\n\nuse_cache = False\nsubregion = \"FERC\"\n\nif use_cache is True:\n with open(\"upstream_dict.pickle\", \"rb\") as handle:\n upstream_dict = pkl.load(handle)\n upstream_df=pd.read_csv(f\"upstream_df.csv\")\n upstream_dict = electricitylci.write_upstream_dicts_to_jsonld(upstream_dict)\n# gen_df = electricitylci.get_alternate_gen_plus_netl()\n# combined_df, canadian_gen = electricitylci.combine_upstream_and_gen_df(gen_df, upstream_df)\n combined_df = pd.read_csv(f\"combined_df.csv\", index_col=0)\n# gen_plus_fuels = electricitylci.add_fuels_to_gen(\n# gen_df, upstream_df, canadian_gen, upstream_dict\n# )\n gen_plus_fuels = pd.read_csv(f\"gen_plus_fuels.csv\", index_col=0)\n aggregate_df = pd.read_csv(f\"aggregate_df.csv\", index_col=0)\n with open(\"aggregate_dict.pickle\", \"rb\") as handle:\n aggregate_dict = pkl.load(handle)\n# aggregate_df = electricitylci.aggregate_gen(gen_plus_fuels, subregion=subregion)\n# aggregate_df.to_csv(f\"aggregate_df.csv\")\n# aggregate_dict = electricitylci.write_gen_fuel_database_to_dict(\n# aggregate_df, upstream_dict, subregion=subregion\n# )\n# with open(\"aggregate_dict.pickle\", \"wb\") as handle:\n# pkl.dump(aggregate_dict, handle, protocol=pkl.HIGHEST_PROTOCOL)\n aggregate_dict = electricitylci.write_process_dicts_to_jsonld(aggregate_dict)\n gen_mix_df = electricitylci.get_generation_mix_process_df(regions=subregion)\n# gen_mix_df = pd.read_csv(f\"gen_mix_df.csv\",index_col=0)\n gen_mix_dict = electricitylci.write_generation_mix_database_to_dict(gen_mix_df,aggregate_dict,regions=subregion)\n# with open(\"gen_mix_dict.pickle\",\"rb\") as handle:\n# gen_mix_dict = pkl.load(handle)\n gen_mix_dict = electricitylci.write_process_dicts_to_jsonld(gen_mix_dict)\n cons_mix_df = electricitylci.get_consumption_mix_df(subregion=subregion)\n cons_mix_dict = electricitylci.write_consumption_mix_to_dict(cons_mix_df,gen_mix_dict,subregion=subregion)\n cons_mix_dict = electricitylci.write_process_dicts_to_jsonld(cons_mix_dict)\n with open(\"cons_mix_dict.pickle\", \"wb\") as handle:\n pkl.dump(cons_mix_dict, handle, protocol=pkl.HIGHEST_PROTOCOL)\n dist_mix_df = electricitylci.get_distribution_mix_df(gen_plus_fuels,subregion=subregion)\n dist_mix_dict = electricitylci.write_distribution_mix_to_dict(dist_mix_df,cons_mix_dict,subregion=subregion)\n dist_mix_dict = electricitylci.write_process_dicts_to_jsonld(dist_mix_dict)\n with open(\"dist_mix_dict.pickle\", \"wb\") as handle:\n pkl.dump(dist_mix_dict, handle, protocol=pkl.HIGHEST_PROTOCOL)\n\n \nelse:\n upstream_df = electricitylci.get_upstream_process_df()\n upstream_df.to_csv(f\"upstream_df.csv\")\n upstream_dict = electricitylci.write_upstream_process_database_to_dict(\n upstream_df\n )\n upstream_dict = electricitylci.write_upstream_dicts_to_jsonld(upstream_dict)\n with open(\"upstream_dict.pickle\", \"wb\") as handle:\n pkl.dump(upstream_dict, handle, protocol=pkl.HIGHEST_PROTOCOL)\n gen_df = electricitylci.get_alternate_gen_plus_netl()\n # The combined DF below should be the final dataframe for generic analysis\n combined_df, canadian_gen = electricitylci.combine_upstream_and_gen_df(gen_df, upstream_df)\n combined_df.to_csv(f\"combined_df.csv\")\n gen_plus_fuels = electricitylci.add_fuels_to_gen(\n gen_df, upstream_df, canadian_gen, upstream_dict\n )\n gen_plus_fuels.to_csv(f\"gen_plus_fuels.csv\")\n aggregate_df = electricitylci.aggregate_gen(gen_plus_fuels, subregion=subregion)\n aggregate_df.to_csv(f\"aggregate_df.csv\")\n aggregate_dict = electricitylci.write_gen_fuel_database_to_dict(\n aggregate_df, upstream_dict, subregion=subregion\n )\n with open(\"aggregate_dict.pickle\", \"wb\") as handle:\n pkl.dump(aggregate_dict, handle, protocol=pkl.HIGHEST_PROTOCOL)\n aggregate_dict = electricitylci.write_process_dicts_to_jsonld(aggregate_dict)\n gen_mix_df = electricitylci.get_generation_mix_process_df(regions=subregion)\n gen_mix_dict = electricitylci.write_generation_mix_database_to_dict(gen_mix_df,aggregate_dict,regions=subregion)\n gen_mix_dict = electricitylci.write_process_dicts_to_jsonld(gen_mix_dict)\n with open(\"gen_mix_dict.pickle\", \"wb\") as handle:\n pkl.dump(gen_mix_dict, handle, protocol=pkl.HIGHEST_PROTOCOL) \n cons_mix_df = electricitylci.get_consumption_mix_df(subregion=subregion)\n cons_mix_dict = electricitylci.write_consumption_mix_to_dict(cons_mix_df,gen_mix_dict,subregion=subregion)\n cons_mix_dict = electricitylci.write_process_dicts_to_jsonld(cons_mix_dict)\n with open(\"cons_mix_dict.pickle\", \"wb\") as handle:\n pkl.dump(cons_mix_dict, handle, protocol=pkl.HIGHEST_PROTOCOL)\n dist_mix_df = electricitylci.get_distribution_mix_df(gen_df,subregion=subregion)\n dist_mix_dict = electricitylci.write_distribution_mix_to_dict(dist_mix_df,cons_mix_dict,subregion=subregion)\n dist_mix_dict = electricitylci.write_process_dicts_to_jsonld(dist_mix_dict)\n with open(\"dist_mix_dict.pickle\", \"wb\") as handle:\n pkl.dump(dist_mix_dict, handle, protocol=pkl.HIGHEST_PROTOCOL)\n \n "
] |
[
[
"pandas.read_csv"
]
] |
parshakova/APIP2
|
[
"011c78b97c86c6fc22535acb798c685d2736b7d7"
] |
[
"apip/utils.py"
] |
[
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\nimport re\nimport os\nimport sys\nimport random\nimport string\nimport logging\n\nimport argparse, msgpack\nimport torch\nimport logging\nimport pickle\nimport pandas as pd\nfrom collections import Counter\n\nimport torch\nimport numpy as np\nfrom itertools import compress\n\n# Modification: remove unused functions and imports, add a boolean parser.\n# Origin: https://github.com/facebookresearch/ParlAI/tree/master/parlai/agents/drqa\n\n# ------------------------------------------------------------------------------\n# General logging utilities.\n# ------------------------------------------------------------------------------\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value.\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum = self.sum + val * n\n self.count = self.count + n\n self.avg = self.sum / self.count\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\ndef setup_logger(name, log_file):\n log = logging.getLogger(name)\n log.setLevel(logging.DEBUG)\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.DEBUG)\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter(fmt='%(asctime)s %(message)s', datefmt='%m/%d %I:%M')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n log.addHandler(fh)\n log.addHandler(ch)\n return log\n\ndef add_arguments(parser):\n # system\n parser.add_argument('--log_file', default='output.log',\n help='path for log file.')\n parser.add_argument('--log_per_updates', type=int, default=400,\n help='log model loss per x updates (mini-batches).')\n parser.add_argument('--data_file', default='SQuAD/data.msgpack',\n help='path to preprocessed data file.')\n parser.add_argument('--model_dir', default='apip_models',\n help='path to store saved models.')\n parser.add_argument('--save_last_only', action='store_true',\n help='only save the final models.')\n parser.add_argument('--eval_per_epoch', type=int, default=1,\n help='perform evaluation per x epochs.')\n parser.add_argument('--seed', type=int, default=937,\n help='random seed for data shuffling, dropout, etc.')\n parser.add_argument(\"--cuda\", type=str2bool, nargs='?',\n const=True, default=torch.cuda.is_available(),\n help='whether to use GPU acceleration.')\n # training\n parser.add_argument('-e', '--epochs', type=int, default=50)\n parser.add_argument('-bs', '--batch_size', type=int, default=32)\n parser.add_argument('-rs', '--resume', default='',\n help='previous model file name (in `model_dir`). '\n 'e.g. \"checkpoint_epoch_11.pt\"')\n parser.add_argument('-rd', '--restore_dir', default='',\n help='previous model file name (in `model_dir`). '\n 'e.g. \"checkpoint_epoch_11.pt\"')\n parser.add_argument('-ro', '--resume_options', action='store_true',\n help='use previous model options, ignore the cli and defaults.')\n parser.add_argument('-rlr', '--reduce_lr', type=float, default=0.,\n help='reduce initial (resumed) learning rate by this factor.')\n parser.add_argument('-op', '--optimizer', default='adamax',\n help='supported optimizer: adamax, sgd')\n parser.add_argument('-gc', '--grad_clipping', type=float, default=20)\n parser.add_argument('-wd', '--weight_decay', type=float, default=0.)\n parser.add_argument('-lr', '--learning_rate', type=float, default=0.001,\n help='only applied to SGD.')\n parser.add_argument('-mm', '--momentum', type=float, default=0,\n help='only applied to SGD.')\n parser.add_argument('-tp', '--tune_partial', type=int, default=1000,\n help='finetune top-x embeddings.')\n parser.add_argument('--fix_embeddings', action='store_true',\n help='if true, `tune_partial` will be ignored.')\n parser.add_argument('--rnn_padding', action='store_true',\n help='perform rnn padding (much slower but more accurate).')\n # different modules\n parser.add_argument('--squad', default=1, type=int,help='SQuAD type: 1.0 or 2.0')\n \n parser.add_argument('--pi_inp', default='rnn_cat', help='input into latent policy =rnn_cat=, =mix=')\n parser.add_argument('--ae_restore', default='apip_models/08m06d_151643/best_model.pt', help='restore file for answer exist policy')\n parser.add_argument('--ae_archt', default='policy_con_4', help='=bili=, =policy=')\n parser.add_argument('--select_i', action='store_true',help='if true, train selection policy for scoring good on SQuAD')\n parser.add_argument('--vae', action='store_true',help='if true, use vae for gradient estimation')\n parser.add_argument('--interpret', action='store_true',help='if true, use induced interpretation values for testing')\n parser.add_argument('--rl_tuning', default='', type=str, help='=pgm=, =pg=, =sc=')\n parser.add_argument('--policy_critic', default='4_3', type=str, help='number of layers in MLP for policy and critic networks')\n parser.add_argument('--debug', action='store_true',help='if true, debug')\n parser.add_argument('--entropy_loss', default=0.0, type=float ,help='if true, use entropy loss')\n parser.add_argument('--batch_norm', action='store_true',help='if true, use BN')\n parser.add_argument('--summary', default=True, type=bool, help='if true, make summaries')\n parser.add_argument('--all_emb_tune', action='store_true',help='if true, tune all embs')\n parser.add_argument('--semisup', action='store_true',help='if true, use semi supervised learning')\n parser.add_argument('--weight_norm', action='store_true',help='if true, use WN')\n parser.add_argument('--drop_nn', action='store_true',help='if true, use dropouts')\n parser.add_argument('--critic_loss', action='store_true',\n help='if true, use optimize for f1 prediction')\n parser.add_argument('--self_critic', action='store_true',\n help='if true, use policy gradient for span prediction')\n parser.add_argument('--n_actions', type=int, default=0)\n parser.add_argument('--control_d', default='', help='=q_dc= or or =q_wa= or =d_qa= or =d_ca=')\n parser.add_argument('--fin_att', default='linear', help='=linear= or =param= or =pointer_s=')\n parser.add_argument('--gate', default='tanh_1', help='=tanh= or =fc=')\n parser.add_argument('--rl_start', type=float, default=float('inf'))\n parser.add_argument('--question_merge', default='self_attn')\n parser.add_argument('--beta', default='const_1')\n parser.add_argument('--alpha', default='const_1')\n parser.add_argument('--pi_q_rnn', default='', help='rnns to update =pi= or =q= or =pi_q=')\n parser.add_argument('--ce_frac', type=float, default=0.9)\n parser.add_argument('-gpp','--grad_prob_print', type=float, default=0.00001)\n parser.add_argument('--dropout_rate', type=float, default=0.1)\n parser.add_argument('--ae_coeff', type=float, default=1.0)\n\n # model\n parser.add_argument('--doc_layers', type=int, default=5)\n parser.add_argument('--question_layers', type=int, default=5)\n parser.add_argument('--hidden_size', type=int, default=128)\n parser.add_argument('--num_features', type=int, default=4)\n parser.add_argument('--pos', type=str2bool, nargs='?', const=True, default=True,\n help='use pos tags as a feature.')\n parser.add_argument('--pos_size', type=int, default=56,\n help='how many kinds of POS tags.')\n parser.add_argument('--pos_dim', type=int, default=56,\n help='the embedding dimension for POS tags.')\n parser.add_argument('--ner', type=str2bool, nargs='?', const=True, default=True,\n help='use named entity tags as a feature.')\n parser.add_argument('--ner_size', type=int, default=19,\n help='how many kinds of named entity tags.')\n parser.add_argument('--ner_dim', type=int, default=19,\n help='the embedding dimension for named entity tags.')\n parser.add_argument('--use_qemb', type=str2bool, nargs='?', const=True, default=True)\n parser.add_argument('--concat_rnn_layers', type=str2bool, nargs='?',\n const=True, default=False)\n parser.add_argument('--dropout_emb', type=float, default=0.5)\n parser.add_argument('--dropout_rnn', type=float, default=0.2)\n parser.add_argument('--dropout_rnn_output', type=str2bool, nargs='?',\n const=True, default=True)\n parser.add_argument('--max_len', type=int, default=15)\n parser.add_argument('--rnn_type', default='sru',\n help='supported types: rnn, gru, lstm')\n return parser\n\n\ndef lr_decay(optimizer, lr_decay, log):\n for param_group in optimizer.param_groups:\n param_group['lr'] *= lr_decay\n log.info('[learning rate reduced by {}]'.format(lr_decay))\n return optimizer\n\ndef select_scope_update(args, epoch):\n # use VAE framework until time rl_start and the switch to RL framework\n if args.rl_start > epoch:\n scope = 'pi_q'\n elif args.rl_start <= epoch:\n if args.rl_start == epoch:\n print(\"\\nSTARTED RL UPDATES\\n\")\n scope = \"rl\"\n return scope\n\ndef load_data(opt, args):\n # return train and development(test) sets\n # max q len = 60\n # max c len = 767\n if opt['squad'] == 1:\n squad_dir = 'SQuAD'\n else:\n squad_dir = 'SQuAD2'\n\n with open(os.path.join(squad_dir, 'meta.msgpack'), 'rb') as f:\n meta = msgpack.load(f, encoding='utf8')\n embedding = torch.Tensor(meta['embedding'])\n opt['pretrained_words'] = True\n opt['vocab_size'] = embedding.size(0)\n opt['embedding_dim'] = embedding.size(1)\n if not opt['fix_embeddings']:\n embedding[1] = torch.normal(means=torch.zeros(opt['embedding_dim']), std=1.)\n with open(args.data_file, 'rb') as f:\n data = msgpack.load(f, encoding='utf8')\n if args.semisup:\n with open(os.path.join(squad_dir, 'q_labels_sm5.pickle'), 'rb') as f:\n q_labels = pickle.load(f, encoding='utf8')\n print(\"loading question labels for %d actions\"%args.n_actions)\n q_l, ql_mask = q_labels[args.n_actions]\n else:\n q_l, ql_mask = [0]*len(data['trn_question_ids']), [0]*len(data['trn_question_ids'])\n train_orig = pd.read_csv(os.path.join(squad_dir, 'train.csv'))\n train = list(zip(\n data['trn_context_ids'],\n data['trn_context_features'],\n data['trn_context_tags'],\n data['trn_context_ents'],\n data['trn_question_ids'],\n train_orig['answer_start_token'].tolist(),\n train_orig['answer_end_token'].tolist(),\n data['trn_ans_exists'],\n data['trn_context_text'],\n data['trn_context_spans']\n ))\n train_y = train_orig['answer'].tolist()[:len(train)]\n train_y = [[y] for y in train_y]\n dev = list(zip(\n data['dev_context_ids'],\n data['dev_context_features'],\n data['dev_context_tags'],\n data['dev_context_ents'],\n data['dev_question_ids'],\n data['dev_ans_exists'],\n data['dev_context_text'],\n data['dev_context_spans']\n ))\n if not 'data2' in args.data_file and not 'data_a' in args.data_file:\n dev_orig = pd.read_csv(os.path.join(squad_dir, 'dev.csv'))\n dev_y = dev_orig['answers'].tolist()[:len(dev)]\n dev_y = [eval(y) for y in dev_y]\n else:\n dev_y = data['dev_answers']\n return train, dev, dev_y, train_y, embedding, opt, q_l, ql_mask\n\n\ndef load_data_train(opt, args):\n # return train and validation sets\n # max q len = 60\n # max c len = 767\n if opt['squad'] == 1:\n squad_dir = 'SQuAD'\n else:\n squad_dir = 'SQuAD2'\n\n with open(os.path.join(squad_dir, 'meta.msgpack'), 'rb') as f:\n meta = msgpack.load(f, encoding='utf8')\n embedding = torch.Tensor(meta['embedding'])\n opt['pretrained_words'] = True\n opt['vocab_size'] = embedding.size(0)\n opt['embedding_dim'] = embedding.size(1)\n if not opt['fix_embeddings']:\n embedding[1] = torch.normal(means=torch.zeros(opt['embedding_dim']), std=1.)\n with open(args.data_file, 'rb') as f:\n data = msgpack.load(f, encoding='utf8')\n if args.semisup:\n with open(os.path.join(squad_dir, 'q_labels_sm5.pickle'), 'rb') as f:\n q_labels = pickle.load(f, encoding='utf8')\n print(\"loading question labels for %d actions\"%args.n_actions)\n q_l, ql_mask = q_labels[args.n_actions]\n else:\n q_l, ql_mask = [0]*len(data['trn_question_ids']), [0]*len(data['trn_question_ids'])\n train_orig = pd.read_csv(os.path.join(squad_dir, 'train.csv'))\n train = list(zip(\n data['trn_context_ids'],\n data['trn_context_features'],\n data['trn_context_tags'],\n data['trn_context_ents'],\n data['trn_question_ids'],\n train_orig['answer_start_token'].tolist(),\n train_orig['answer_end_token'].tolist(),\n data['trn_ans_exists'],\n data['trn_context_text'],\n data['trn_context_spans']\n ))\n train_y = train_orig['answer'].tolist()[:len(train)]\n train_y = [[y] for y in train_y]\n dev = list(zip(\n data['val_context_ids'],\n data['val_context_features'],\n data['val_context_tags'],\n data['val_context_ents'],\n data['val_question_ids'],\n data['val_ans_exists'],\n data['val_context_text'],\n data['val_context_spans']\n ))\n if not 'data2' in args.data_file and not 'data_a' in args.data_file:\n dev_orig = pd.read_csv(os.path.join(squad_dir, 'valid.csv'))\n dev_y = dev_orig['answers'].tolist()[:len(dev)]\n dev_y = [eval(y) for y in dev_y]\n else:\n dev_y = data['dev_answers']\n return train, dev, dev_y, train_y, embedding, opt, q_l, ql_mask\n\n\nclass BatchGen:\n def __init__(self, data, batch_size, gpu, evaluation=False, shuffle=False):\n '''\n input:\n data - list of lists\n batch_size - int\n '''\n self.batch_size = batch_size\n self.eval = evaluation\n self.gpu = gpu\n\n # shuffle\n if not evaluation or shuffle:\n indices = list(range(len(data)))\n random.shuffle(indices)\n data = [data[i] for i in indices]\n self.indices = [indices[i:i + batch_size] for i in range(0, len(data), batch_size)]\n else:\n indices = list(range(len(data)))\n self.indices = [indices[i:i + batch_size] for i in range(0, len(data), batch_size)]\n \n # chunk into batches\n data = [data[i:i + batch_size] for i in range(0, len(data), batch_size)]\n self.data = data\n\n def __len__(self):\n return len(self.data)\n\n def __iter__(self):\n for batch in self.data:\n batch_size = len(batch)\n batch = list(zip(*batch))\n if self.eval:\n #assert len(batch) == 7\n pass\n else:\n assert len(batch) == 10\n\n context_len = max(len(x) for x in batch[0])\n context_id = torch.LongTensor(batch_size, context_len).fill_(0)\n for i, doc in enumerate(batch[0]):\n context_id[i, :len(doc)] = torch.LongTensor(doc)\n\n feature_len = len(batch[1][0][0])\n context_feature = torch.Tensor(batch_size, context_len, feature_len).fill_(0)\n for i, doc in enumerate(batch[1]):\n for j, feature in enumerate(doc):\n context_feature[i, j, :] = torch.Tensor(feature)\n\n context_tag = torch.LongTensor(batch_size, context_len).fill_(0)\n for i, doc in enumerate(batch[2]):\n context_tag[i, :len(doc)] = torch.LongTensor(doc)\n\n context_ent = torch.LongTensor(batch_size, context_len).fill_(0)\n for i, doc in enumerate(batch[3]):\n context_ent[i, :len(doc)] = torch.LongTensor(doc)\n question_len = max(len(x) for x in batch[4])\n question_id = torch.LongTensor(batch_size, question_len).fill_(0)\n for i, doc in enumerate(batch[4]):\n question_id[i, :len(doc)] = torch.LongTensor(doc)\n\n context_mask = torch.eq(context_id, 0)\n question_mask = torch.eq(question_id, 0)\n if not self.eval:\n y_s = torch.LongTensor(batch[5])\n y_e = torch.LongTensor(batch[6])\n\n exists = torch.FloatTensor(batch[-3])\n text = list(batch[-2])\n span = list(batch[-1])\n if self.gpu:\n context_id = context_id.pin_memory()\n context_feature = context_feature.pin_memory()\n context_tag = context_tag.pin_memory()\n context_ent = context_ent.pin_memory()\n context_mask = context_mask.pin_memory()\n question_id = question_id.pin_memory()\n question_mask = question_mask.pin_memory()\n if self.eval:\n yield (context_id, context_feature, context_tag, context_ent, context_mask,\n question_id, question_mask, exists, text, span)\n else:\n yield (context_id, context_feature, context_tag, context_ent, context_mask,\n question_id, question_mask, y_s, y_e, exists, text, span)\n\n\ndef _normalize_answer(s):\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\n\ndef _exact_match(pred, answers):\n if pred is None or answers is None:\n return False\n pred = _normalize_answer(pred)\n if pred == \"\" and (len(set(answers))==0 or len(set(answers[0]))==0 or answers[0]=='nan'):\n return True\n for a in answers:\n if pred == _normalize_answer(a):\n return True\n return False\n\n\ndef _f1_score(pred, answers):\n def _score(p_tokens, a_tokens):\n common = Counter(p_tokens) & Counter(a_tokens)\n num_same = sum(common.values())\n if num_same == 0:\n return 0\n precision = 1. * num_same / len(p_tokens)\n recall = 1. * num_same / len(a_tokens)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n\n if pred is None or answers is None:\n return 0\n if len(set(answers))==0 or len(set(answers[0]))==0 or answers[0]=='nan':\n if pred == \"\":\n return 1\n else:\n return 0\n p_tokens = _normalize_answer(pred).split()\n scores = [_score(p_tokens, _normalize_answer(a).split()) for a in answers]\n return max(scores)\n\n\ndef _overlap_score(pred, answers):\n def _score(p_tokens, a_tokens):\n common = Counter(p_tokens) & Counter(a_tokens)\n num_same = sum(common.values())\n if num_same == 0:\n return 0\n recall = 1. * num_same / len(a_tokens)\n return recall\n\n if pred is None or answers is None:\n return 0\n if len(set(answers))==0 or len(set(answers[0]))==0 or answers[0]=='nan':\n if pred == \"\":\n return 1\n else:\n return 0\n p_tokens = _normalize_answer(pred).split()\n scores = [_score(p_tokens, _normalize_answer(a).split()) for a in answers]\n return max(scores)\n\n\ndef overlap(pred, truth):\n assert len(pred) == len(truth)\n ov = 0\n for p, t in zip(pred, truth):\n ov += _overlap_score(p, t)\n return ov\n\n\ndef score_test_alli(pred, truth):\n assert len(pred) == len(truth), \"pred = %d, truth = %d\"%(len(pred), len(truth))\n f1 = em = total = 0\n for p, t in zip(pred, truth):\n em += _exact_match(p, t)\n f1 += _f1_score(p, t)\n return em, f1\n\ndef score(pred, truth):\n assert len(pred) == len(truth), \"pred = %d, truth = %d\"%(len(pred), len(truth))\n f1 = em = total = 0\n for p, t in zip(pred, truth):\n total += 1\n em += _exact_match(p, t)\n f1 += _f1_score(p, t)\n em = 100. * em / total\n f1 = 100. * f1 / total\n return em, f1\n\ndef score_list(pred, truth, mask):\n assert len(pred) == len(truth), \"pred = %d, truth = %d\"%(len(pred), len(truth))\n f1 = []; em = []; \n for p, t in zip(pred, truth):\n em += [_exact_match(p, t)]\n f1 += [_f1_score(p, t)]\n print(sum(1-np.array(mask)), (np.array(f1).squeeze()==1).sum(), ((np.array(mask).squeeze()==0)*(np.array(f1).squeeze()==1)).sum(),\\\n ((np.array(mask).squeeze()==0)*(np.array(em).squeeze()==1)).sum())\n em = 100. * (np.array(mask).squeeze()*np.array(em)).sum() / sum(mask)\n f1 = 100. * (np.array(mask).squeeze()*np.array(f1)).sum() / sum(mask)\n return em, f1\n\ndef score_sc(pred_s, pred_m, truth):\n f1_s, f1_m = [], []\n if pred_s:\n assert len(pred_s) == len(truth)\n for ps, pm, t in zip(pred_s, pred_m, truth):\n f1_s += [_f1_score(ps, t)]\n f1_m += [_f1_score(pm, t)]\n else:\n for pm, t in zip(pred_m, truth):\n f1_m += [_f1_score(pm, t)]\n return np.array(f1_s), np.array(f1_m)\n\ndef score_em(pred_s, pred_m, truth):\n f1_s, f1_m = [], []\n if pred_s:\n assert len(pred_s) == len(truth)\n for ps, pm, t in zip(pred_s, pred_m, truth):\n f1_s += [_exact_match(ps, t)]\n f1_m += [_exact_match(pm, t)]\n else:\n for pm, t in zip(pred_m, truth):\n f1_m += [_exact_match(pm, t)]\n return np.array(f1_s), np.array(f1_m).astype(np.float32)\n\ndef load_data_ae_1(opt, args):\n # max q len = 60\n # max c len = 767\n squad_dir = 'SQuAD2'\n\n with open(os.path.join(squad_dir, 'meta.msgpack'), 'rb') as f:\n meta = msgpack.load(f, encoding='utf8')\n embedding = torch.Tensor(meta['embedding'])\n opt['pretrained_words'] = True\n opt['vocab_size'] = embedding.size(0)\n opt['embedding_dim'] = embedding.size(1)\n if not opt['fix_embeddings']:\n embedding[1] = torch.normal(means=torch.zeros(opt['embedding_dim']), std=1.)\n with open('SQuAD2/data.msgpack', 'rb') as f:\n data = msgpack.load(f, encoding='utf8')\n if args.semisup:\n with open(os.path.join(squad_dir, 'q_labels_sm5.pickle'), 'rb') as f:\n q_labels = pickle.load(f, encoding='utf8')\n print(\"loading question labels for %d actions\"%args.n_actions)\n q_l, ql_mask = q_labels[args.n_actions]\n else:\n q_l, ql_mask = [0]*len(data['trn_question_ids']), [0]*len(data['trn_question_ids'])\n train_orig = pd.read_csv(os.path.join(squad_dir, 'train.csv'))\n\n def filter_list(x, mask):\n return list(compress(x, mask))\n\n m1 = data['trn_ans_exists'].copy()\n m2 = data['dev_ans_exists'].copy()\n\n data['trn_context_ids'] = filter_list(data['trn_context_ids'], m1)\n data['trn_context_features'] = filter_list(data['trn_context_features'], m1)\n data['trn_context_tags'] = filter_list(data['trn_context_tags'], m1)\n data['trn_context_ents'] = filter_list(data['trn_context_ents'], m1)\n data['trn_question_ids'] = filter_list(data['trn_question_ids'], m1)\n data['trn_context_text'] = filter_list(data['trn_context_text'], m1)\n data['trn_context_spans'] = filter_list(data['trn_context_spans'], m1)\n data['trn_ans_exists'] = [1]*sum(m1)\n answer_start_token = filter_list(train_orig['answer_start_token'].tolist(), m1)\n answer_end_token = filter_list(train_orig['answer_end_token'].tolist(), m1)\n q_l = filter_list(q_l, m1)\n ql_mask = filter_list(ql_mask, m1)\n\n\n data['dev_context_ids'] = filter_list(data['dev_context_ids'], m2)\n data['dev_context_features'] = filter_list(data['dev_context_features'], m2)\n data['dev_context_tags'] = filter_list(data['dev_context_tags'], m2)\n data['dev_context_ents'] = filter_list(data['dev_context_ents'], m2)\n data['dev_question_ids'] = filter_list(data['dev_question_ids'], m2)\n data['dev_context_text'] = filter_list(data['dev_context_text'], m2)\n data['dev_context_spans'] = filter_list(data['dev_context_spans'], m2)\n data['dev_ans_exists'] = [1]*sum(m2)\n\n train = list(zip(\n data['trn_context_ids'],\n data['trn_context_features'],\n data['trn_context_tags'],\n data['trn_context_ents'],\n data['trn_question_ids'],\n answer_start_token,\n answer_end_token,\n data['trn_ans_exists'],\n data['trn_context_text'],\n data['trn_context_spans']\n ))\n train_y = train_orig['answer'].tolist()[:len(m1)]\n train_y = filter_list(train_y, m1)\n train_y = [[y] for y in train_y]\n dev = list(zip(\n data['dev_context_ids'],\n data['dev_context_features'],\n data['dev_context_tags'],\n data['dev_context_ents'],\n data['dev_question_ids'],\n data['dev_ans_exists'],\n data['dev_context_text'],\n data['dev_context_spans']\n ))\n if not 'data2' in args.data_file:\n dev_orig = pd.read_csv(os.path.join(squad_dir, 'dev.csv'))\n dev_y = dev_orig['answers'].tolist()[:len(m2)]\n dev_y = [eval(y) for y in dev_y]\n else:\n dev_y = data['dev_answers']\n dev_y = filter_list(dev_y, m2)\n\n print(\"original size = %d, filtered size = %d, %d\"%(len(m1), len(answer_end_token), sum(m1)))\n print(\"original size = %d, filtered size = %d, %d\"%(len(m2), len(dev_y), sum(m2)))\n\n return train, dev, dev_y, train_y, embedding, opt, q_l, ql_mask\n"
] |
[
[
"torch.LongTensor",
"torch.Tensor",
"torch.zeros",
"torch.eq",
"torch.FloatTensor",
"torch.cuda.is_available",
"numpy.array"
]
] |
emiliom/echopype
|
[
"3665faeb251d4db4bd8e1dfe05f41942c86f10ea"
] |
[
"echopype/convert/utils/set_groups_base.py"
] |
[
"from __future__ import absolute_import, division, print_function\nimport os\nimport numpy as np\nimport netCDF4\nimport xarray as xr\n\n\nclass SetGroupsBase:\n \"\"\"Base class for setting groups in netCDF file.\n \"\"\"\n\n def __init__(self, file_path='test.nc'):\n self.file_path = file_path\n\n def set_toplevel(self, tl_dict):\n \"\"\"Set attributes in the Top-level group.\"\"\"\n with netCDF4.Dataset(self.file_path, \"w\", format=\"NETCDF4\") as ncfile:\n [ncfile.setncattr(k, v) for k, v in tl_dict.items()]\n\n def set_provenance(self, src_file_names, prov_dict):\n \"\"\"Set the Provenance group in the nc file.\n\n Parameters\n ----------\n src_file_names\n source filenames\n prov_dict\n dictionary containing file conversion parameters\n prov_dict['conversion_software_name']\n prov_dict['conversion_software_version']\n prov_dict['conversion_time']\n \"\"\"\n # create group\n nc_file = netCDF4.Dataset(self.file_path, \"a\", format=\"NETCDF4\")\n pr = nc_file.createGroup(\"Provenance\")\n\n # dimensions\n pr.createDimension(\"filenames\", None)\n\n # variables\n pr_src_fnames = pr.createVariable(src_file_names, str, \"filenames\")\n pr_src_fnames.long_name = \"Source filenames\"\n\n # set group attributes\n for k, v in prov_dict.items():\n pr.setncattr(k, v)\n\n # close nc file\n nc_file.close()\n\n def set_sonar(self, sonar_dict):\n \"\"\"Set the Sonar group in the nc file.\n\n Parameters\n ----------\n sonar_dict\n dictionary containing sonar parameters\n \"\"\"\n # create group\n ncfile = netCDF4.Dataset(self.file_path, \"a\", format=\"NETCDF4\")\n snr = ncfile.createGroup(\"Sonar\")\n\n # set group attributes\n for k, v in sonar_dict.items():\n snr.setncattr(k, v)\n\n # close nc file\n ncfile.close()\n\n def set_nmea(self, nmea_dict):\n \"\"\"Set the Platform/NMEA group in the nc file.\n\n Parameters\n ----------\n nmea_dict\n dictionary containing platform parameters\n \"\"\"\n # Only save platform group if file_path exists\n if not os.path.exists(self.file_path):\n print('netCDF file does not exist, exiting without saving Platform group...')\n else:\n # Convert np.datetime64 numbers to seconds since 1900-01-01\n # due to xarray.to_netcdf() error on encoding np.datetime64 objects directly\n time = (nmea_dict['nmea_time'] - np.datetime64('1900-01-01T00:00:00')) \\\n / np.timedelta64(1, 's')\n ds = xr.Dataset(\n {'NMEA_datagram': (['time'], nmea_dict['nmea_datagram'],\n {'long_name': 'NMEA datagram'})\n },\n coords={'time': (['time'], time,\n {'axis': 'T',\n 'calendar': 'gregorian',\n 'long_name': 'Timestamps for NMEA datagrams',\n 'standard_name': 'time',\n 'units': 'seconds since 1900-01-01'})},\n attrs={'description': 'All NMEA sensor datagrams'})\n # save to file\n ds.to_netcdf(path=self.file_path, mode=\"a\", group=\"Platform/NMEA\")\n"
] |
[
[
"numpy.timedelta64",
"numpy.datetime64"
]
] |
yangdb/RD-IOD
|
[
"64beb2e1efe823185adc0feb338a900f1a7df7a7"
] |
[
"lib/datasets/coco_14_40_base.py"
] |
[
"from __future__ import print_function\nfrom __future__ import absolute_import\n# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nimport xml.dom.minidom as minidom\n\nimport os\n# import PIL\nimport numpy as np\nimport scipy.sparse\nimport subprocess\nimport math\nimport glob\nimport uuid\nimport scipy.io as sio\nimport xml.etree.ElementTree as ET\nimport pickle\nfrom .imdb import imdb\nfrom .imdb import ROOT_DIR\nfrom . import ds_utils\nfrom .voc_eval import voc_eval\n\n# TODO: make fast_rcnn irrelevant\n# >>>> obsolete, because it depends on sth outside of this project\nfrom model.utils.config import cfg\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n# <<<< obsolete\n\n\nclass coco_14_40_base(imdb):\n def __init__(self, image_set, year, devkit_path=None):\n imdb.__init__(self, 'voc_' + year + '_' + image_set)\n self._year = year\n self._image_set = image_set\n self._devkit_path = self._get_default_path() if devkit_path is None \\\n else devkit_path\n self._data_path = os.path.join(self._devkit_path)\n self._classes = ('__background__', # always index 0\n 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', \\\n 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', \\\n 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', \\\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', \\\n 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', \\\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',\\\n 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', \\\n 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table',\\\n 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', \\\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', \\\n 'teddy bear', 'hair drier', 'toothbrush')\n self._classes=self._classes[:41]\n self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))\n self._image_ext = '.jpg'\n self._image_index = self._load_image_set_index()\n # Default to roidb handler\n # self._roidb_handler = self.selective_search_roidb\n self._roidb_handler = self.gt_roidb\n self._salt = str(uuid.uuid4())\n self._comp_id = 'comp4'\n\n # PASCAL specific config options\n self.config = {'cleanup': True,\n 'use_salt': True,\n 'use_diff': False,\n 'matlab_eval': False,\n 'rpn_file': None,\n 'min_size': 2}\n\n assert os.path.exists(self._devkit_path), \\\n 'VOCdevkit path does not exist: {}'.format(self._devkit_path)\n assert os.path.exists(self._data_path), \\\n 'Path does not exist: {}'.format(self._data_path)\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self._image_index[i])\n\n def image_id_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return i\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n image_path = os.path.join(self._data_path, 'JPEGImages',\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n # Example path to image set file:\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index\n\n def _get_default_path(self):\n \"\"\"\n Return the default path where PASCAL VOC is expected to be installed.\n \"\"\"\n return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n '''\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = pickle.load(fid)\n print('{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n '''\n gt_roidb = [self._load_pascal_annotation(index)\n for index in self.image_index]\n '''\n with open(cache_file, 'wb') as fid:\n pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)\n print('wrote gt roidb to {}'.format(cache_file))\n '''\n\n return gt_roidb\n\n def selective_search_roidb(self):\n \"\"\"\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path,\n self.name + '_selective_search_roidb.pkl')\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = pickle.load(fid)\n print('{} ss roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n ss_roidb = self._load_selective_search_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)\n else:\n roidb = self._load_selective_search_roidb(None)\n with open(cache_file, 'wb') as fid:\n pickle.dump(roidb, fid, pickle.HIGHEST_PROTOCOL)\n print('wrote ss roidb to {}'.format(cache_file))\n\n return roidb\n\n def rpn_roidb(self):\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n rpn_roidb = self._load_rpn_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)\n else:\n roidb = self._load_rpn_roidb(None)\n\n return roidb\n\n def _load_rpn_roidb(self, gt_roidb):\n filename = self.config['rpn_file']\n print('loading {}'.format(filename))\n assert os.path.exists(filename), \\\n 'rpn data not found at: {}'.format(filename)\n with open(filename, 'rb') as f:\n box_list = pickle.load(f)\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_selective_search_roidb(self, gt_roidb):\n filename = os.path.abspath(os.path.join(cfg.DATA_DIR,\n 'selective_search_data',\n self.name + '.mat'))\n assert os.path.exists(filename), \\\n 'Selective search data not found at: {}'.format(filename)\n raw_data = sio.loadmat(filename)['boxes'].ravel()\n\n box_list = []\n for i in xrange(raw_data.shape[0]):\n boxes = raw_data[i][:, (1, 0, 3, 2)] - 1\n keep = ds_utils.unique_boxes(boxes)\n boxes = boxes[keep, :]\n keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])\n boxes = boxes[keep, :]\n box_list.append(boxes)\n\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_pascal_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from XML file in the PASCAL VOC\n format.\n \"\"\"\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n tree = ET.parse(filename)\n objs = tree.findall('object')\n # if not self.config['use_diff']:\n # # Exclude the samples labeled as difficult\n # non_diff_objs = [\n # obj for obj in objs if int(obj.find('difficult').text) == 0]\n # # if len(non_diff_objs) != len(objs):\n # # print 'Removed {} difficult objects'.format(\n # # len(objs) - len(non_diff_objs))\n # objs = non_diff_objs\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n ishards = np.zeros((num_objs), dtype=np.int32)\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n\n diffc = obj.find('difficult')\n difficult = 0 if diffc == None else int(diffc.text)\n ishards[ix] = difficult\n\n cls = self._class_to_ind[obj.find('name').text.lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_ishard': ishards,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': seg_areas}\n\n def _get_comp_id(self):\n comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']\n else self._comp_id)\n return comp_id\n\n def _get_voc_results_file_template(self):\n # VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt\n filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'\n filedir = os.path.join(self._devkit_path, 'results', 'VOC' + self._year, 'Main')\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n path = os.path.join(filedir, filename)\n return path\n\n def _write_voc_results_file(self, all_boxes):\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n print('Writing {} VOC results file'.format(cls))\n filename = self._get_voc_results_file_template().format(cls)\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(self.image_index):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # the VOCdevkit expects 1-based indices\n for k in xrange(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(index, dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n\n def _do_python_eval(self, output_dir='output'):\n annopath = os.path.join(\n self._devkit_path,\n 'VOC' + self._year,\n 'Annotations',\n '{:s}.xml')\n imagesetfile = os.path.join(\n self._devkit_path,\n 'VOC' + self._year,\n 'ImageSets',\n 'Main',\n self._image_set + '.txt')\n cachedir = os.path.join(self._devkit_path, 'annotations_cache')\n aps = []\n # The PASCAL VOC metric changed in 2010\n use_07_metric = True if int(self._year) < 2010 else False\n print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n for i, cls in enumerate(self._classes):\n if cls == '__background__':\n continue\n filename = self._get_voc_results_file_template().format(cls)\n rec, prec, ap = voc_eval(\n filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,\n use_07_metric=use_07_metric)\n aps += [ap]\n print('AP for {} = {:.4f}'.format(cls, ap))\n with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:\n pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)\n print('Mean AP = {:.4f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('Results:')\n for ap in aps:\n print('{:.4f}'.format(ap))\n print('{:.4f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('')\n print('--------------------------------------------------------------')\n print('Results computed with the **unofficial** Python eval code.')\n print('Results should be very close to the official MATLAB eval code.')\n print('Recompute with `./tools/reval.py --matlab ...` for your paper.')\n print('-- Thanks, The Management')\n print('--------------------------------------------------------------')\n\n def _do_matlab_eval(self, output_dir='output'):\n print('-----------------------------------------------------')\n print('Computing results with the official MATLAB eval code.')\n print('-----------------------------------------------------')\n path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',\n 'VOCdevkit-matlab-wrapper')\n cmd = 'cd {} && '.format(path)\n cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)\n cmd += '-r \"dbstop if error; '\n cmd += 'voc_eval(\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',\\'{:s}\\'); quit;\"' \\\n .format(self._devkit_path, self._get_comp_id(),\n self._image_set, output_dir)\n print('Running:\\n{}'.format(cmd))\n status = subprocess.call(cmd, shell=True)\n\n def evaluate_detections(self, all_boxes, output_dir):\n self._write_voc_results_file(all_boxes)\n self._do_python_eval(output_dir)\n if self.config['matlab_eval']:\n self._do_matlab_eval(output_dir)\n if self.config['cleanup']:\n for cls in self._classes:\n if cls == '__background__':\n continue\n filename = self._get_voc_results_file_template().format(cls)\n os.remove(filename)\n\n def competition_mode(self, on):\n if on:\n self.config['use_salt'] = False\n self.config['cleanup'] = False\n else:\n self.config['use_salt'] = True\n self.config['cleanup'] = True\n\n\nif __name__ == '__main__':\n d = pascal_voc_07_15('trainval', '2007')\n res = d.roidb\n from IPython import embed;\n\n embed()\n"
] |
[
[
"scipy.io.loadmat",
"numpy.zeros",
"numpy.mean"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.