repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
krematas/scanner
[ "1fc94a908e1059b60d0f10872324f183d3f00514" ]
[ "examples/apps/soccer/depth/estimate_depth.py" ]
[ "import scannerpy\nimport numpy as np\nimport os\nfrom scannerpy import Database, DeviceType, Job, ColumnType, FrameType\n\nfrom os.path import join\nimport glob\n\nimport torch\nimport torch.nn as nn\nfrom torchvision import transforms\n\nfrom hourglass import hg8\nimport matplotlib.pyplot as plt\nfrom scipy.misc import imresize\n\nimport argparse\nimport time\nimport subprocess as sp\n\n# Testing settings\nparser = argparse.ArgumentParser(description='Depth estimation using Stacked Hourglass')\nparser.add_argument('--path_to_data', default='/home/krematas/Mountpoints/grail/data/barcelona/')\nparser.add_argument('--path_to_model', default='/home/krematas/Mountpoints/grail/tmp/cnn/model.pth')\nparser.add_argument('--visualize', action='store_true')\nparser.add_argument('--cloud', action='store_true')\nparser.add_argument('--bucket', default='', type=str)\n\nopt, _ = parser.parse_known_args()\n\n\[email protected]_python_op(device_type=DeviceType.GPU)\nclass MyDepthEstimationClass(scannerpy.Kernel):\n def __init__(self, config):\n if opt.cloud:\n checkpoint = torch.load('model.pth')\n else:\n checkpoint = torch.load( config.args['model_path'])\n netG_state_dict = checkpoint['state_dict']\n netG = hg8(input_nc=4, output_nc=51)\n netG.load_state_dict(netG_state_dict)\n netG.cuda()\n\n self.logsoftmax = nn.LogSoftmax()\n self.normalize = transforms.Normalize(mean=[0.3402085, 0.42575407, 0.23771574],\n std=[0.1159472, 0.10461029, 0.13433486])\n\n self.img_size = config.args['img_size']\n self.net = netG\n\n def execute(self, image: FrameType, mask: FrameType) -> FrameType:\n\n # Rescale\n image = imresize(image, (self.img_size, self.img_size))\n mask = imresize(mask[:, :, 0], (self.img_size, self.img_size), interp='nearest', mode='F')\n\n # ToTensor\n image = image.transpose((2, 0, 1))/255.0\n mask = mask[:, :, None].transpose((2, 0, 1))/255.0\n\n image_tensor = torch.from_numpy(image)\n image_tensor = torch.FloatTensor(image_tensor.size()).copy_(image_tensor)\n mask_tensor = torch.from_numpy(mask)\n\n # Normalize\n image_tensor = self.normalize(image_tensor)\n\n # Make it BxCxHxW\n image_tensor = image_tensor.unsqueeze(0)\n mask_tensor = mask_tensor.unsqueeze(0)\n\n # Concat input and mask\n image_tensor = torch.cat((image_tensor.float(), mask_tensor.float()), 1)\n image_tensor = image_tensor.cuda()\n\n output = self.net(image_tensor)\n final_prediction = self.logsoftmax(output[-1])\n\n np_prediction = final_prediction.cpu().detach().numpy()\n np_prediction = np_prediction[0, :, :, :]\n\n return np_prediction.astype(np.float32)\n\ndataset = opt.path_to_data\n\nif opt.cloud:\n def get_paths(path):\n paths = sp.check_output('gsutil ls gs://{:s}/{:s}'.format(opt.bucket, path),\n shell=True).strip().decode('utf-8')\n paths = paths.split('\\n')\n prefix_len = len('gs://{:s}'.format(opt.bucket))\n stripped_paths = [p[prefix_len:] for p in paths]\n return stripped_paths\n image_files = get_paths(join(dataset, 'players', 'images', '*.jpg'))\n mask_files = get_paths(join(dataset, 'players', 'masks', '*.png'))\nelse:\n image_files = glob.glob(join(dataset, 'players', 'images', '*.jpg'))\n mask_files = glob.glob(join(dataset, 'players', 'masks', '*.png'))\n\nimage_files.sort()\nmask_files.sort()\n\nmodel_path = opt.path_to_model\n\nif opt.cloud:\n print('Finding master IP...')\n ip = sp.check_output(\n '''\n kubectl get pods -l 'app=scanner-master' -o json | \\\n jq '.items[0].spec.nodeName' -r | \\\n xargs -I {} kubectl get nodes/{} -o json | \\\n jq '.status.addresses[] | select(.type == \"ExternalIP\") | .address' -r\n ''',\n shell=True).strip().decode('utf-8')\n \n port = sp.check_output(\n '''\n kubectl get svc/scanner-master -o json | \\\n jq '.spec.ports[0].nodePort' -r\n ''',\n shell=True).strip().decode('utf-8')\n\n master = '{}:{}'.format(ip, port)\n print(master)\n db = Database(master=master, start_cluster=False, config_path='./config.toml',\n grpc_timeout=60)\nelse:\n db = Database()\n \n\n\nconfig = db.config.config['storage']\nparams = {'bucket': opt.bucket,\n 'storage_type': config['type'],\n 'endpoint': 'storage.googleapis.com',\n 'region': 'US'}\n\nencoded_image = db.sources.Files(**params)\nframe = db.ops.ImageDecoder(img=encoded_image)\n\nencoded_mask = db.sources.Files(**params)\nmask_frame = db.ops.ImageDecoder(img=encoded_mask)\n\n\nmy_depth_estimation_class = db.ops.MyDepthEstimationClass(image=frame, mask=mask_frame,\n img_size=256, model_path=model_path,\n device=DeviceType.GPU)\noutput_op = db.sinks.FrameColumn(columns={'frame': my_depth_estimation_class})\n\njob = Job(\n op_args={\n encoded_image: {'paths': image_files, **params},\n encoded_mask: {'paths': mask_files, **params},\n output_op: 'example_resized5',\n })\n\nstart = time.time()\n[out_table] = db.run(output_op, [job], force=True, work_packet_size=8, io_packet_size=16)\nend = time.time()\n\nprint('Total time for depth estimation in scanner: {0:.3f} sec'.format(end-start))\n\n\nresults = out_table.column('frame').load()\n\npath_to_save = join(dataset, 'players', 'prediction_scanner')\nif not os.path.exists(path_to_save):\n os.mkdir(path_to_save)\n\nfor i, res in enumerate(results):\n pred_scanner = np.argmax(res, axis=0)\n np.save(join(path_to_save, '{0:05d}.npy'.format(i)), res)\n\n # if opt.visualize:\n # # Visualization\n # pred = np.load(pred_files[i])[0, :, :, :]\n # pred = np.argmax(pred, axis=0)\n # fig, ax = plt.subplots(1, 2)\n #\n # ax[1].imshow(pred)\n # ax[0].imshow(pred_scanner)\n # plt.show()\n" ]
[ [ "torch.nn.LogSoftmax", "scipy.misc.imresize", "torch.from_numpy", "numpy.argmax", "torch.load" ] ]
qq1418381215/caat
[ "1422707bef7a2aeca272fa085f410bff07ced760" ]
[ "warp_transducer/pytorch_binding/tune_rnnt.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom warprnnt_pytorch import DelayTLoss, RNNTLoss\nimport time\ndevice=0\nimport os,psutil\n\nprocess = psutil.Process(os.getpid())\n\n\ndef main(src_len=1000, tgt_len=100, voc=30000, bsz=1):\n \n cuda= True\n rnnt_loss=DelayTLoss(blank=0, delay_scale=1., reduction='mean')\n rnnt2= RNNTLoss()\n torch.manual_seed(20)\n label= (torch.rand(bsz,tgt_len)*voc).int().cuda(device) \n acts= torch.randn(bsz,src_len,tgt_len+1, voc).cuda(device)\n label= label.clamp(0, voc-1)\n acts.requires_grad=True\n acts2= acts.detach()\n acts2.requires_grad= True\n #print(f\"acts cost {acts.numel()*4} bytes\")\n #print(torch.cuda.memory_summary(device=device))\n #print('Used Memory:',process.memory_info().rss / 1024 / 1024,'MB')\n start= time.time()\n #acts=F.log_softmax(acts, dim=-1)\n act_lengths= label.new(bsz).fill_(src_len)\n label_lengths= label.new(bsz).fill_(tgt_len)\n \"\"\" loss= rnnt2(acts, label,act_lengths, label_lengths)\n loss= loss*0.1\n loss.backward()\n print(\"loss={loss.item()}\") \"\"\"\n \n loss, loss_rnnt, loss_delay= rnnt_loss(acts2, label, act_lengths, label_lengths)\n print(f\"loss={loss.item()}, loss_rnnt={loss_rnnt.item()}, loss_delay={loss_delay.item()}\") \n print(torch.cuda.memory_summary(device=device))\n loss= loss*0.1\n loss.backward()\n print(torch.cuda.memory_summary(device=device))\n #diff= acts.grad - acts2.grad\n \n #import pdb;pdb.set_trace()\n \n grad= acts2.grad\n isbad= torch.abs(grad)>10\n print(f\" max={grad.max().item()}, min={grad.min().item()}, bad={isbad.sum().item()}\")\n end=time.time()\n print(f\"src_len={src_len}, tgt_len={tgt_len},voc={voc},bsz={bsz}, cost={end-start} secs\")\n\nif __name__ == \"__main__\":\n # (32, 70, 30000,1) 780M,0.027s\n #(32, 70, 30000,10) 7.8G 0.218s\n #(32, 70, 30000,5) 3.9G 0.093s\n #(32, 70, 30000,5)cpu segment fault\n #cli 5 80 70 30000 6.6G, 0.33s\n #cli 10,32,70,30000 5.3G 0.276s\n #cli 5 80 70 30000 cpu 5.9g mem, 2.38s\n #python 5 80 70 30000 cpu 13g mem, 3.17s, 去除logsoftmax 9g\n #(32, 70, 30000,5) 0.42s, 2.6G\n #(20, 70, 30000, 10) 0.07s, 3.3G\n #(20, 70, 30000, 10) release 0.087s, 3.3G\n # 看上去做文本任务必需拆输出,隐层不需要拆借,softmax投影需要拆\n #(20,70,30000,30)12g不足\n # should compact with split trick\n #[2, 32, 117, 256], slen [27,27], tlen [105,91]\n #[4, 25, 83, 256]\n main(40, 100, 3000,20) \n" ]
[ [ "torch.rand", "torch.cuda.memory_summary", "torch.manual_seed", "torch.abs", "torch.randn" ] ]
zkzcx/RFFnet
[ "8981a22956b37405c3bb5e465f1d99fbe92ab536" ]
[ "train_test_vgg.py" ]
[ "from __future__ import print_function\n\nimport argparse\nimport pickle\nimport time\n\nimport numpy as np\nimport os\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.optim as optim\nimport torch.utils.data as data\nfrom torch.autograd import Variable\n\nfrom data import VOCroot, COCOroot, VOC_300, VOC_512, COCO_300, COCO_512, COCO_mobile_300, AnnotationTransform, \\\n COCODetection, VOCDetection, detection_collate, BaseTransform, preproc\nfrom layers.functions import Detect, PriorBox\nfrom layers.modules import MultiBoxLoss\nfrom utils.nms_wrapper import nms\nfrom utils.timer import Timer\n\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nparser = argparse.ArgumentParser(\n description='Receptive Field Block Net Training')\nparser.add_argument('-v', '--version', default='FSSD_vgg',\n help='FSSD_vgg, RFB_vgg ,RFB_E_vgg SSD_vgg version.')\nparser.add_argument('-s', '--size', default='512',\n help='300 or 512 input size.')\nparser.add_argument('-d', '--dataset', default='VOC',\n help='VOC or COCO dataset')\nparser.add_argument(\n '--basenet', default='weights/vgg16_reducedfc.pth', help='pretrained base model')\nparser.add_argument('--jaccard_threshold', default=0.5,\n type=float, help='Min Jaccard index for matching')\nparser.add_argument('-b', '--batch_size', default=16,\n type=int, help='Batch size for training')\nparser.add_argument('--num_workers', default=4,\n type=int, help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True,\n type=bool, help='Use cuda to train model')\nparser.add_argument('--ngpu', default=1, type=int, help='gpus')\nparser.add_argument('--lr', '--learning-rate',\n default=0.001, type=float, help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, help='momentum')\t\n\nparser.add_argument('--resume_net', default='weight/FSSD_vgg5120/FSSD_vgg_VOC_epoches_110.pth', help='resume net for retraining')\n#parser.add_argument('--resume_net', default=None, help='resume net for retraining')\nparser.add_argument('--resume_epoch', default=110,\n type=int, help='resume iter for retraining')\n\nparser.add_argument('-max', '--max_epoch', default=200,\n type=int, help='max epoch for retraining')\nparser.add_argument('--weight_decay', default=1e-5,\n type=float, help='Weight decay for SGD')\nparser.add_argument('-we', '--warm_epoch', default=6,\n type=int, help='max epoch for retraining')\nparser.add_argument('--gamma', default=0.1,\n type=float, help='Gamma update for SGD')\nparser.add_argument('--log_iters', default=True,\n type=bool, help='Print the loss at each iteration')\nparser.add_argument('--save_folder', default='weight/',\n help='Location to save checkpoint models')\nparser.add_argument('--date', default='0304')\nparser.add_argument('--save_frequency', default=5)\nparser.add_argument('--retest', default=False, type=bool,\n help='test cache results')\nparser.add_argument('--test_frequency', default=5)\nparser.add_argument('--visdom', default=False, type=str2bool, help='Use visdom to for loss visualization')\nparser.add_argument('--send_images_to_visdom', type=str2bool, default=False,\n help='Sample a random image from each 10th batch, send it to visdom after augmentations step')\nargs = parser.parse_args()\n\nsave_folder = os.path.join(args.save_folder, args.version + '5120')\nif not os.path.exists(save_folder):\n os.makedirs(save_folder)\ntest_save_dir = os.path.join(save_folder, 'ss_predict')\nif not os.path.exists(test_save_dir):\n os.makedirs(test_save_dir)\n\nlog_file_path = save_folder + '/train' + time.strftime('_%Y-%m-%d-%H-%M', time.localtime(time.time())) + '.log'\nif args.dataset == 'VOC':\n train_sets = [('2007', 'trainval'), ('2012', 'trainval')]\n cfg = (VOC_300, VOC_512)[args.size == '512']\nelse:\n train_sets = [('2017', 'train')]\n cfg = (COCO_300, COCO_512)[args.size == '512']\n\nif args.version == 'RFB_vgg':\n from models.RFB_Net_vgg import build_net\nelif args.version == 'RFB_E_vgg':\n from models.RFB_Net_E_vgg import build_net\nelif args.version == 'FSSD_vgg':\n if args.size == '300':\n from models.FSSD_vgg_FPN300 import build_net\n elif args.size == '512':\n from models.FSSD_vgg_FPN512 import build_net\n else:\n print('Unkown version!')\nelse:\n print('Unkown version!')\nrgb_std = (1, 1, 1)\nimg_dim = (300, 512)[args.size == '512']\nif 'FSSD_vgg' in args.version:\n rgb_means = (104, 117, 123)\nelif 'FSSD_mobile' in args.version:\n rgb_means = (103.94, 116.78, 123.68)\n\np = (0.6, 0.2)[args.version == 'RFB_mobile']\nnum_classes = (21, 81)[args.dataset == 'COCO']\nbatch_size = args.batch_size\nweight_decay = 0.0005\ngamma = 0.1\nmomentum = 0.9\nif args.visdom:\n import visdom\n\n viz = visdom.Visdom()\n\nnet = build_net(img_dim, num_classes)\nprint(net)\nif not args.resume_net:\n base_weights = torch.load(args.basenet)\n print('Loading base network...')\n net.base.load_state_dict(base_weights)\n\n\n def xavier(param):\n init.xavier_uniform(param)\n\n\n def weights_init(m):\n for key in m.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal_(m.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n m.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n m.state_dict()[key][...] = 0\n\n\n print('Initializing weights...')\n # initialize newly added layers' weights with kaiming_normal method\n net.extras.apply(weights_init)\n net.loc.apply(weights_init)\n net.conf.apply(weights_init)\n if args.version == 'FSSD_vgg' or args.version == 'FRFBSSD_vgg':\n net.ft_module.apply(weights_init)\n net.pyramid_ext.apply(weights_init)\n if 'RFB' in args.version:\n net.Norm.apply(weights_init)\n if args.version == 'RFB_E_vgg':\n net.reduce.apply(weights_init)\n net.up_reduce.apply(weights_init)\n\nelse:\n # load resume network\n resume_net_path = os.path.join(save_folder, args.version + '_' + args.dataset + '_epoches_' + \\\n str(args.resume_epoch) + '.pth')\n print('Loading resume network', resume_net_path)\n state_dict = torch.load(resume_net_path)\n # create new OrderedDict that does not contain `module.`\n from collections import OrderedDict\n\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n head = k[:7]\n if head == 'module.':\n name = k[7:] # remove `module.`\n else:\n name = k\n new_state_dict[name] = v\n net.load_state_dict(new_state_dict)\n\nif args.ngpu > 1:\n net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))\n\nif args.cuda:\n net.cuda()\n cudnn.benchmark = True\n\ndetector = Detect(num_classes, 0, cfg)\noptimizer = optim.SGD(net.parameters(), lr=args.lr,\n momentum=args.momentum, weight_decay=args.weight_decay)\n# optimizer = optim.RMSprop(net.parameters(), lr=args.lr,alpha = 0.9, eps=1e-08,\n# momentum=args.momentum, weight_decay=args.weight_decay)\n\ncriterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False)\npriorbox = PriorBox(cfg)\nwith torch.no_grad():\n priors = Variable(priorbox.forward())\n# dataset\nprint('Loading Dataset...')\nif args.dataset == 'VOC':\n testset = VOCDetection(\n VOCroot, [('2007', 'test')], None, AnnotationTransform())\n train_dataset = VOCDetection(VOCroot, train_sets, preproc(\n img_dim, rgb_means, rgb_std, p), AnnotationTransform())\nelif args.dataset == 'COCO':\n testset = COCODetection(\n COCOroot, [('2017', 'val')], None)\n train_dataset = COCODetection(COCOroot, train_sets, preproc(\n img_dim, rgb_means, rgb_std, p))\nelse:\n print('Only VOC and COCO are supported now!')\n exit()\n\n\ndef mixup_data(x, y, alpha=0, use_cuda=True):\n '''compute the mixup data, Return mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n print(\"-\"*30)\n print(x.size()[0])\n batch_size = x.size()[0]\n\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n mixed_x = lam * x + (1 - lam) * x[index,:]\n\n y_a, y_b = y, [y[i] for i in index]\n return mixed_x, y_a, y_b, lam\n\n\ndef train():\n net.train()\n # loss counters\n epoch = 0\n if args.resume_net:\n epoch = 0 + args.resume_epoch\n epoch_size = len(train_dataset) // args.batch_size\n max_iter = args.max_epoch * epoch_size\n\n stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)\n stepvalues_COCO = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)\n stepvalues = (stepvalues_VOC, stepvalues_COCO)[args.dataset == 'COCO']\n print('Training', args.version, 'on', train_dataset.name)\n print(' Total params: %.2fM' % (sum(p.numel() for p in net.parameters()) / 1000000.0))\n step_index = 0\n\n if args.visdom:\n # initialize visdom loss plot\n lot = viz.line(\n X=torch.zeros((1,)).cpu(),\n Y=torch.zeros((1, 3)).cpu(),\n opts=dict(\n xlabel='Iteration',\n ylabel='Loss',\n title='Current SSD Training Loss',\n legend=['Loc Loss', 'Conf Loss', 'Loss']\n )\n )\n epoch_lot = viz.line(\n X=torch.zeros((1,)).cpu(),\n Y=torch.zeros((1, 3)).cpu(),\n opts=dict(\n xlabel='Epoch',\n ylabel='Loss',\n title='Epoch SSD Training Loss',\n legend=['Loc Loss', 'Conf Loss', 'Loss']\n )\n )\n if args.resume_epoch > 0:\n start_iter = args.resume_epoch * epoch_size\n else:\n start_iter = 0\n\n log_file = open(log_file_path, 'w')\n batch_iterator = None\n mean_loss_c = 0\n mean_loss_l = 0\n for iteration in range(start_iter, max_iter + 10):\n if (iteration % epoch_size == 0):\n # create batch iterator\n batch_iterator = iter(data.DataLoader(train_dataset, batch_size,\n shuffle=True, num_workers=args.num_workers,\n collate_fn=detection_collate))\n loc_loss = 0\n conf_loss = 0\n if epoch % args.save_frequency == 0 and epoch > 0:\n torch.save(net.state_dict(), os.path.join(save_folder, args.version + '_' + args.dataset + '_epoches_' +\n repr(epoch) + '.pth'))\n if epoch % args.test_frequency == 0 and epoch > 0:\n net.eval()\n top_k = (300, 200)[args.dataset == 'COCO']\n if args.dataset == 'VOC':\n APs, mAP = test_net(test_save_dir, net, detector, args.cuda, testset,\n BaseTransform(net.size, rgb_means, rgb_std, (2, 0, 1)),\n top_k, thresh=0.01)\n APs = [str(num) for num in APs]\n mAP = str(mAP)\n log_file.write(str(iteration) + ' APs:\\n' + '\\n'.join(APs))\n log_file.write('mAP:\\n' + mAP + '\\n')\n else:\n test_net(test_save_dir, net, detector, args.cuda, testset,\n BaseTransform(net.size, rgb_means, rgb_std, (2, 0, 1)),\n top_k, thresh=0.01)\n\n net.train()\n epoch += 1\n\n load_t0 = time.time()\n if iteration in stepvalues:\n step_index = stepvalues.index(iteration) + 1\n if args.visdom:\n viz.line(\n X=torch.ones((1, 3)).cpu() * epoch,\n Y=torch.Tensor([mean_loss_l, mean_loss_c,\n mean_loss_l + mean_loss_c]).unsqueeze(0).cpu() / epoch_size,\n win=epoch_lot,\n update='append'\n )\n lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)\n\n # load train data\n images, targets = next(batch_iterator)\n\n # print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))\n\n # if args.cuda:\n # images = Variable(images.cuda())\n # targets = [Variable(anno.cuda(), volatile=True) for anno in targets]\n # else:\n # images = Variable(images)\n # targets = [Variable(anno, volatile=True) for anno in targets]\n # forward\n\n images, targets_a, targets_b, lam = mixup_data(images, targets, alpha=0)\n images = Variable(images.cuda())\n targets_a, targets_b = [Variable(anno_a.cuda()) for anno_a in targets_a], [Variable(anno_b.cuda()) for anno_b in targets_b]\n\n\n out = net(images)\n # backprop\n optimizer.zero_grad()\n\n loss_la, loss_ca = criterion(out, priors, targets_a)\n loss_lb, loss_cb = criterion(out, priors, targets_b)\n loss_l = lam * loss_la + (1 - lam) * loss_lb\n loss_c = lam * loss_ca + (1 - lam) * loss_cb\n\n # arm branch loss\n #loss_l, loss_c = criterion(out, priors, targets)\n # odm branch loss\n\n mean_loss_c += loss_c.item()\n mean_loss_l += loss_l.item()\n\n loss = loss_l + loss_c\n loss.backward()\n optimizer.step()\n load_t1 = time.time()\n if iteration % 10 == 0:\n print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)\n + '|| Totel iter ' +\n repr(iteration) + ' || L: %.4f C: %.4f||' % (\n mean_loss_l / 10, mean_loss_c / 10) +\n 'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))\n log_file.write(\n 'Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)\n + '|| Totel iter ' +\n repr(iteration) + ' || L: %.4f C: %.4f||' % (\n mean_loss_l / 10, mean_loss_c / 10) +\n 'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr) + '\\n')\n\n mean_loss_c = 0\n mean_loss_l = 0\n if args.visdom and args.send_images_to_visdom:\n random_batch_index = np.random.randint(images.size(0))\n viz.image(images.data[random_batch_index].cpu().numpy())\n log_file.close()\n torch.save(net.state_dict(), os.path.join(save_folder,\n 'Final_' + args.version + '_' + args.dataset + '.pth'))\n\n\nimport math\ndef adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):\n \"\"\"Sets the learning rate\n # Adapted from PyTorch Imagenet example:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n if epoch < args.warm_epoch:\n lr = 1e-6 + (args.lr - 1e-6) * iteration / (epoch_size * args.warm_epoch)\n else:\n lr = 1e-6 + (args.lr-1e-6) * 0.5 * (1 + math.cos((iteration-6*epoch_size)*math.pi/((args.max_epoch-6)*epoch_size)))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return lr\n\n'''\nimport math\ndef adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):\n \"\"\"Sets the learning rate\n # Adapted from PyTorch Imagenet example:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n if epoch < 6:\n lr = 1e-6 + (args.lr-1e-6) * iteration / (epoch_size * 6)\n else:\n lr = 1e-6 + (args.lr-1e-6) * 0.5 * (1 + math.cos((iteration-6*epoch_size)*math.pi/((args.max_epoch-6)*epoch_size)))\n #lr = args.lr\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n'''\n\ndef test_net(save_folder, net, detector, cuda, testset, transform, max_per_image=300, thresh=0.005):\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n # dump predictions and assoc. ground truth to text file for now\n num_images = len(testset)\n num_classes = (21, 81)[args.dataset == 'COCO']\n all_boxes = [[[] for _ in range(num_images)]\n for _ in range(num_classes)]\n\n _t = {'im_detect': Timer(), 'misc': Timer()}\n det_file = os.path.join(save_folder, 'detections.pkl')\n\n if args.retest:\n f = open(det_file, 'rb')\n all_boxes = pickle.load(f)\n print('Evaluating detections')\n testset.evaluate_detections(all_boxes, save_folder)\n return\n\n for i in range(num_images):\n img = testset.pull_image(i)\n x = Variable(transform(img).unsqueeze(0), volatile=True)\n if cuda:\n x = x.cuda()\n\n _t['im_detect'].tic()\n out = net(x=x, test=True) # forward pass\n boxes, scores = detector.forward(out, priors)\n detect_time = _t['im_detect'].toc()\n boxes = boxes[0]\n scores = scores[0]\n\n boxes = boxes.cpu().numpy()\n scores = scores.cpu().numpy()\n # scale each detection back up to the image\n scale = torch.Tensor([img.shape[1], img.shape[0],\n img.shape[1], img.shape[0]]).cpu().numpy()\n boxes *= scale\n\n _t['misc'].tic()\n\n for j in range(1, num_classes):\n inds = np.where(scores[:, j] > thresh)[0]\n if len(inds) == 0:\n all_boxes[j][i] = np.empty([0, 5], dtype=np.float32)\n continue\n c_bboxes = boxes[inds]\n c_scores = scores[inds, j]\n c_dets = np.hstack((c_bboxes, c_scores[:, np.newaxis])).astype(\n np.float32, copy=False)\n if args.dataset == 'VOC':\n cpu = False\n else:\n cpu = False\n\n keep = nms(c_dets, 0.45, force_cpu=cpu)\n keep = keep[:50]\n c_dets = c_dets[keep, :]\n all_boxes[j][i] = c_dets\n if max_per_image > 0:\n image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1, num_classes)])\n if len(image_scores) > max_per_image:\n image_thresh = np.sort(image_scores)[-max_per_image]\n for j in range(1, num_classes):\n keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]\n all_boxes[j][i] = all_boxes[j][i][keep, :]\n\n nms_time = _t['misc'].toc()\n\n if i % 20 == 0:\n print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s'\n .format(i + 1, num_images, detect_time, nms_time))\n _t['im_detect'].clear()\n _t['misc'].clear()\n\n with open(det_file, 'wb') as f:\n pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n\n print('Evaluating detections')\n if args.dataset == 'VOC':\n APs, mAP = testset.evaluate_detections(all_boxes, save_folder)\n return APs, mAP\n else:\n testset.evaluate_detections(all_boxes, save_folder)\n\n\nif __name__ == '__main__':\n train()\n" ]
[ [ "torch.zeros", "numpy.empty", "torch.nn.init.xavier_uniform", "torch.no_grad", "torch.randperm", "torch.ones", "numpy.where", "numpy.random.beta", "torch.utils.data.DataLoader", "torch.load", "numpy.sort", "numpy.hstack", "torch.Tensor" ] ]
Skeftical/modelbasedaqp
[ "1dbba997c16493e0344aaa95901f3de5e670c352", "5f49653e48fc5a4ec1f1eb44e2a96787876681f4" ]
[ "code/Performance_Storage/verdict-perf-build-samples.py", "sql_parser/parser.py" ]
[ "import pyverdict\nimport argparse\nimport logging\nimport os\nimport sys\nimport time\nimport pandas as pd\nimport pickle\nimport re\nos.chdir('../../')\n#print(os.listdir('.'))\nsys.path.append('.')\n\nif not os.path.exists('output/performance'):\n print('creating ', 'performance')\n os.makedirs('output/performance')\nif not os.path.exists('output/performance/csvs/verdict'):\n print('creating ', 'performance csvs')\n os.makedirs('output/performance/csvs/verdict')\n\nif __name__=='__main__':\n print(\"main executing\")\n\n verdict = pyverdict.postgres('127.0.0.1',5433,dbname='tpch1g',user='analyst',password='analyst')\n res = verdict.sql(\"SHOW SCRAMBLES\")\n print(res)\n verdict.sql(\"DROP ALL SCRAMBLE public.lineitem;\")\n verdict.sql(\"DROP ALL SCRAMBLE public.orders;\")\n verdict.sql(\"DROP ALL SCRAMBLE public.partsupp;\")\n\n result = {}\n result['sample_size'] = []\n result['time'] = []\n ratios = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n start = time.time()\n for ratio in ratios:\n verdict.sql(\"\"\"CREATE SCRAMBLE IF NOT EXISTS public.lineitem_x\n FROM public.lineitem SIZE {}\"\"\".format(ratio))\n verdict.sql(\"\"\"CREATE SCRAMBLE IF NOT EXISTS public.orders_x\n FROM public.orders SIZE {}\"\"\".format(ratio))\n verdict.sql(\"\"\"CREATE SCRAMBLE IF NOT EXISTS public.partsupp_x\n FROM public.partsupp SIZE {}\"\"\".format(ratio))\n end = time.time()-start\n result['sample_size'].append(ratio)\n result['time'].append(end)\n verdict.sql(\"\"\"DROP SCRAMBLE public.lineitem_x\n ON public.lineitem \"\"\")\n verdict.sql(\"\"\"DROP SCRAMBLE public.orders_x\n ON public.orders\"\"\")\n verdict.sql(\"\"\"DROP SCRAMBLE public.partsupp_x\n ON public.partsupp\"\"\")\n\n result = pd.DataFrame(result)\n result.to_csv('output/performance/csvs/verdict/verdict-sample-building-ratio.csv')\n\n# print(res)\n", "import sqlparse\nfrom sqlparse.sql import Where, TokenList, Function\nfrom sqlparse.tokens import Name, Keyword, DML, Wildcard, Comparison\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nimport pandas as pd\nsql = \"\"\"select avg(add_to_car_order), count(*) tags, sum(cart)\nfrom order_products\nwhere add_to_cart_order <= 2 OR add_to_cart_order>0 AND packet=4.555\ngroup by reordered;\"\"\"\n\nsql2 = \"\"\"\nSELECT product_name, count(*) as order_count\nFROM order_products, orders, products\nWHERE orders.order_id = order_products.order_id\n AND order_products.product_id = products.product_id\n AND (order_dow = 0 OR order_dow = 1)\nGROUP BY product_name\nORDER BY order_count DESC\nLIMIT 5;\n\"\"\"\nclass Parser():\n\n def parse(self,sql_stmt):\n self.parsed = sqlparse.parse(sql_stmt)\n for t in self.parsed[0]:\n if isinstance(t,Where):\n self.__vectorize(t)\n if t.ttype is DML and t.value.lower()=='select':\n self.__projections(t,self.parsed[0])\n if t.ttype is Keyword and t.value.lower()=='group by':\n self.__groupbyattrs(t, self.parsed[0])\n\n\n def __vectorize(self,tokenlist):\n token_list = TokenList(list(tokenlist.flatten()))\n # print(token_list.tokens)\n for x in token_list:\n if x.ttype is Comparison:\n idx_comp_op = token_list.token_index(x) #Index of comparison operator\n attr = token_list.token_prev(idx_comp_op,skip_ws=True, skip_cm=True)[1].value#Name of the attribute\n print(attr)\n comp_op = x\n # print(comp_op)\n if comp_op.value =='<' or comp_op.value=='<=':\n lit_dir = 'ub'\n elif comp_op.value == '>' or comp_op.value=='>=':\n lit_dir = 'lb'\n else:\n lit_dir = 'bi'\n # print(lit_dir)\n try :\n lit = float(token_list.token_next(idx_comp_op, skip_ws=True, skip_cm=True)[1].value) #literal value\n except ValueError:\n print(\"Possible join, skipping\")\n continue;\n # print(lit)\n if lit_dir=='bi':\n self.query_vec['_'.join([attr,'lb'])] = lit\n self.query_vec['_'.join([attr, 'ub'])] = lit\n continue;\n self.query_vec['_'.join([attr,lit_dir])] = lit #lit_dir is either lb or ub\n\n def __projections(self,token, tokenlist):\n idx = tokenlist.token_index(token)\n afs_list_idx, afs = tokenlist.token_next(idx, skip_ws=True, skip_cm=True)\n afs_list = TokenList(list(afs.flatten()))\n for af in afs_list: # Get AFs\n\n if af.value.lower() in ['avg','count','sum','min','max']:\n # if af not in self.afs_dic:\n # self.afs_dic[af.value] = []\n af_idx = afs_list.token_index(af)\n punc_idx, _ = afs_list.token_next(af_idx, skip_ws=True, skip_cm=True)\n attr_idx, attr = afs_list.token_next(punc_idx, skip_ws=True, skip_cm=True)\n if attr.ttype is not Wildcard:\n self.afs.append('_'.join([af.value, attr.value]))\n else:\n self.afs.append(af.value)\n\n def __groupbyattrs(self, token, tokenlist):\n g_index = tokenlist.token_index(token)\n attr_idx , attr = tokenlist.token_next(g_index)\n for g_attr in attr.flatten():\n if g_attr.ttype is Name:\n self.groupby_attrs.append(g_attr.value)\n\n def get_groupby_attrs(self):\n return self.groupby_attrs\n\n def get_projections(self):\n return self.afs\n\n def get_vector(self):\n return self.query_vec\n\n def __init__(self):\n self.query_vec = {} # {attr : {'lb' : val, 'ub': val}}\n self.afs = [] # {'af' : ['attr1','attr2']}\n self.groupby_attrs = []\n\n\nclass QueryVectorizer():\n\n def __trickle_down(self, length):\n\n for k in self.__internal_dict:\n k_length = len(self.__internal_dict[k])\n if k_length<length and k_length!=0:\n self.__internal_dict[k]+=[self.__internal_dict[k][-1]]*(length-k_length)\n\n def insert(self,key, value):\n \"\"\"\n Insert using key(=attribute) and value(s)\n ------------------------------------------\n key : str\n value : int,str,float,list\n \"\"\"\n #Whenever there is a list it must be a groupby attribute\n if isinstance(value, list):\n self.__internal_dict[key]+=value\n self.__trickle_down(len(self.__internal_dict[key]))\n else:\n self.__internal_dict[key].append(value)\n listlength = len(self.__internal_dict[key])\n self.__max_row_size = listlength if listlength > self.__max_row_size else self.__max_row_size\n\n def to_dense(self):\n if self.__sparse_matrix is not None:\n return self.__sparse_matrix.todense()\n else:\n self.to_matrix()\n return self.to_dense()\n\n def to_dataframe(self):\n if self.__sparse_matrix is not None:\n self.inverse_attr_str_mapper = dict([(value, key) for key, value in self.attr_str_mapper.items()])\n df = pd.DataFrame(self.to_dense(), columns=self.__column_names)\n for attr in self.attrs_with_str:\n df[attr] = df[attr].replace(self.inverse_attr_str_mapper)\n df = df.replace({0:np.nan, -10: 0})\n\n return df\n else:\n self.to_matrix()\n return self.to_dataframe()\n\n def to_matrix(self):\n row_ind = []\n col_ind = []\n data = []\n for i,attr in enumerate(self.__internal_dict):\n for j,val in enumerate(self.__internal_dict[attr]):\n col_ind.append(i)\n row_ind.append(j)\n if val==0:\n val = -10\n if isinstance(val, str):\n self.attrs_with_str.add(attr)\n val = self.attr_str_mapper.setdefault(val, len(self.attr_str_mapper))\n\n data.append(val)\n self.__sparse_matrix = csr_matrix((data, (row_ind, col_ind)),shape=(self.__max_row_size,self.__column_size))\n return self.__sparse_matrix\n\n def get_column_names(self):\n return self.__column_names\n\n def _get_internal_representation(self):\n return self.__internal_dict\n\n def __init__(self, attributes, SET_OWN=False):\n self.__internal_dict = {}\n self.attrs_with_str = set()\n self.attr_str_mapper = {}\n self.__max_row_size = 0\n self.__sparse_matrix = None\n if not SET_OWN:\n for k in attributes:\n self.__internal_dict['_'.join([k,'lb'])] = []\n self.__internal_dict['_'.join([k,'ub'])] = []\n else:\n for k in attributes:\n self.__internal_dict[k] = []\n self.__column_size = len(self.__internal_dict)\n self.__column_names = self.__internal_dict.keys()\n\n\nif __name__=='__main__':\n # parser = Parser()\n # parser.parse(sql)\n # print(parser.get_vector())\n # print(parser.get_projections())\n # print(parser.get_groupby_attrs())\n qv = QueryVectorizer(['a1','a2','a3'])\n qv.insert('a1_lb',10)\n qv.insert('a2_lb',['a','b','c'])\n qv.insert('a2_lb',['a','b','c'])\n qv.insert('a3_ub', [0,1])\n print(qv._get_internal_representation())\n print(qv.to_matrix())\n print(qv.to_dense())\n print(qv.to_dataframe())\n" ]
[ [ "pandas.DataFrame" ], [ "scipy.sparse.csr_matrix" ] ]
LiQiang0307/yolov4-pytorch
[ "06e5a1bdb8700b58820db9403b639b7311434fc9" ]
[ "train.py" ]
[ "#-------------------------------------#\r\n# 对数据集进行训练\r\n#-------------------------------------#\r\nimport os\r\nimport time\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import DataLoader\r\nfrom tqdm import tqdm\r\n\r\nfrom nets.yolo4 import YoloBody\r\nfrom nets.yolo_training import Generator, YOLOLoss\r\nfrom utils.dataloader import YoloDataset, yolo_dataset_collate\r\n\r\n\r\n#---------------------------------------------------#\r\n# 获得类和先验框\r\n#---------------------------------------------------#\r\ndef get_classes(classes_path):\r\n '''loads the classes'''\r\n with open(classes_path) as f:\r\n class_names = f.readlines()\r\n class_names = [c.strip() for c in class_names]\r\n return class_names\r\n\r\ndef get_anchors(anchors_path):\r\n '''loads the anchors from a file'''\r\n with open(anchors_path) as f:\r\n anchors = f.readline()\r\n anchors = [float(x) for x in anchors.split(',')]\r\n return np.array(anchors).reshape([-1,3,2])[::-1,:,:]\r\n\r\ndef get_lr(optimizer):\r\n for param_group in optimizer.param_groups:\r\n return param_group['lr']\r\n\r\n \r\ndef fit_one_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epoch,cuda):\r\n total_loss = 0\r\n val_loss = 0\r\n\r\n net.train()\r\n with tqdm(total=epoch_size,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:\r\n for iteration, batch in enumerate(gen):\r\n if iteration >= epoch_size:\r\n break\r\n images, targets = batch[0], batch[1]\r\n with torch.no_grad():\r\n if cuda:\r\n images = Variable(torch.from_numpy(images).type(torch.FloatTensor)).cuda()\r\n targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]\r\n else:\r\n images = Variable(torch.from_numpy(images).type(torch.FloatTensor))\r\n targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]\r\n\r\n #----------------------#\r\n # 清零梯度\r\n #----------------------#\r\n optimizer.zero_grad()\r\n #----------------------#\r\n # 前向传播\r\n #----------------------#\r\n outputs = net(images)\r\n losses = []\r\n num_pos_all = 0\r\n #----------------------#\r\n # 计算损失\r\n #----------------------#\r\n for i in range(3):\r\n loss_item, num_pos = yolo_losses[i](outputs[i], targets)\r\n losses.append(loss_item)\r\n num_pos_all += num_pos\r\n\r\n loss = sum(losses) / num_pos_all\r\n #----------------------#\r\n # 反向传播\r\n #----------------------#\r\n loss.backward()\r\n optimizer.step()\r\n\r\n total_loss += loss.item()\r\n \r\n pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1), \r\n 'lr' : get_lr(optimizer)})\r\n pbar.update(1)\r\n\r\n net.eval()\r\n print('Start Validation')\r\n with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:\r\n for iteration, batch in enumerate(genval):\r\n if iteration >= epoch_size_val:\r\n break\r\n images_val, targets_val = batch[0], batch[1]\r\n\r\n with torch.no_grad():\r\n if cuda:\r\n images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor)).cuda()\r\n targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]\r\n else:\r\n images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor))\r\n targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]\r\n optimizer.zero_grad()\r\n outputs = net(images_val)\r\n losses = []\r\n num_pos_all = 0\r\n for i in range(3):\r\n loss_item, num_pos = yolo_losses[i](outputs[i], targets_val)\r\n losses.append(loss_item)\r\n num_pos_all += num_pos\r\n loss = sum(losses) / num_pos_all\r\n val_loss += loss.item()\r\n pbar.set_postfix(**{'total_loss': val_loss / (iteration + 1)})\r\n pbar.update(1)\r\n print('Finish Validation')\r\n print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))\r\n print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))\r\n\r\n print('Saving state, iter:', str(epoch+1))\r\n torch.save(model.state_dict(), 'logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth'%((epoch+1),total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))\r\n\r\n#----------------------------------------------------#\r\n# 检测精度mAP和pr曲线计算参考视频\r\n# https://www.bilibili.com/video/BV1zE411u7Vw\r\n#----------------------------------------------------#\r\nif __name__ == \"__main__\":\r\n #-------------------------------#\r\n # 是否使用Cuda\r\n # 没有GPU可以设置成False\r\n #-------------------------------#\r\n Cuda = True\r\n #-------------------------------#\r\n # Dataloder的使用\r\n #-------------------------------#\r\n Use_Data_Loader = True\r\n #------------------------------------------------------#\r\n # 是否对损失进行归一化,用于改变loss的大小\r\n # 用于决定计算最终loss是除上batch_size还是除上正样本数量\r\n #------------------------------------------------------#\r\n normalize = False\r\n #-------------------------------#\r\n # 输入的shape大小\r\n # 显存比较小可以使用416x416\r\n # 显存比较大可以使用608x608\r\n #-------------------------------#\r\n input_shape = (416,416)\r\n\r\n #----------------------------------------------------#\r\n # classes和anchor的路径,非常重要\r\n # 训练前一定要修改classes_path,使其对应自己的数据集\r\n #----------------------------------------------------#\r\n anchors_path = 'model_data/yolo_anchors.txt'\r\n classes_path = 'model_data/voc_classes.txt' \r\n #----------------------------------------------------#\r\n # 获取classes和anchor\r\n #----------------------------------------------------#\r\n class_names = get_classes(classes_path)\r\n anchors = get_anchors(anchors_path)\r\n num_classes = len(class_names)\r\n \r\n #------------------------------------------------------#\r\n # Yolov4的tricks应用\r\n # mosaic 马赛克数据增强 True or False\r\n # Cosine_scheduler 余弦退火学习率 True or False\r\n # label_smoothing 标签平滑 0.01以下一般 如0.01、0.005\r\n #------------------------------------------------------#\r\n mosaic = True\r\n Cosine_lr = False\r\n smoooth_label = 0\r\n\r\n #------------------------------------------------------#\r\n # 创建yolo模型\r\n # 训练前一定要修改classes_path和对应的txt文件\r\n #------------------------------------------------------#\r\n model = YoloBody(len(anchors[0]), num_classes)\r\n\r\n #------------------------------------------------------#\r\n # 权值文件请看README,百度网盘下载\r\n #------------------------------------------------------#\r\n model_path = \"model_data/yolo4_weights.pth\"\r\n print('Loading weights into state dict...')\r\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n model_dict = model.state_dict()\r\n pretrained_dict = torch.load(model_path, map_location=device)\r\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}\r\n model_dict.update(pretrained_dict)\r\n model.load_state_dict(model_dict)\r\n print('Finished!')\r\n\r\n net = model.train()\r\n\r\n if Cuda:\r\n net = torch.nn.DataParallel(model)\r\n cudnn.benchmark = True\r\n net = net.cuda()\r\n\r\n # 建立loss函数\r\n yolo_losses = []\r\n for i in range(3):\r\n yolo_losses.append(YOLOLoss(np.reshape(anchors,[-1,2]),num_classes, \\\r\n (input_shape[1], input_shape[0]), smoooth_label, Cuda, normalize))\r\n\r\n #----------------------------------------------------#\r\n # 获得图片路径和标签\r\n #----------------------------------------------------#\r\n annotation_path = '2007_train.txt'\r\n #----------------------------------------------------------------------#\r\n # 验证集的划分在train.py代码里面进行\r\n # 2007_test.txt和2007_val.txt里面没有内容是正常的。训练不会使用到。\r\n # 当前划分方式下,验证集和训练集的比例为1:9\r\n #----------------------------------------------------------------------#\r\n val_split = 0.1\r\n with open(annotation_path) as f:\r\n lines = f.readlines()\r\n np.random.seed(10101)\r\n np.random.shuffle(lines)\r\n np.random.seed(None)\r\n num_val = int(len(lines)*val_split)\r\n num_train = len(lines) - num_val\r\n \r\n #------------------------------------------------------#\r\n # 主干特征提取网络特征通用,冻结训练可以加快训练速度\r\n # 也可以在训练初期防止权值被破坏。\r\n # Init_Epoch为起始世代\r\n # Freeze_Epoch为冻结训练的世代\r\n # Epoch总训练世代\r\n # 提示OOM或者显存不足请调小Batch_size\r\n #------------------------------------------------------#\r\n if True:\r\n lr = 1e-3\r\n Batch_size = 4\r\n Init_Epoch = 0\r\n Freeze_Epoch = 50\r\n \r\n optimizer = optim.Adam(net.parameters(),lr,weight_decay=5e-4)\r\n if Cosine_lr:\r\n lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)\r\n else:\r\n lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)\r\n\r\n if Use_Data_Loader:\r\n train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic, is_train=True)\r\n val_dataset = YoloDataset(lines[num_train:], (input_shape[0], input_shape[1]), mosaic=False, is_train=False)\r\n gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,\r\n drop_last=True, collate_fn=yolo_dataset_collate)\r\n gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4,pin_memory=True, \r\n drop_last=True, collate_fn=yolo_dataset_collate)\r\n else:\r\n gen = Generator(Batch_size, lines[:num_train],\r\n (input_shape[0], input_shape[1])).generate(train=True, mosaic = mosaic)\r\n gen_val = Generator(Batch_size, lines[num_train:],\r\n (input_shape[0], input_shape[1])).generate(train=False, mosaic = mosaic)\r\n\r\n epoch_size = max(1, num_train//Batch_size)\r\n epoch_size_val = num_val//Batch_size\r\n #------------------------------------#\r\n # 冻结一定部分训练\r\n #------------------------------------#\r\n for param in model.backbone.parameters():\r\n param.requires_grad = False\r\n\r\n for epoch in range(Init_Epoch,Freeze_Epoch):\r\n fit_one_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,gen_val,Freeze_Epoch,Cuda)\r\n lr_scheduler.step()\r\n\r\n if True:\r\n lr = 1e-4\r\n Batch_size = 2\r\n Freeze_Epoch = 50\r\n Unfreeze_Epoch = 100\r\n\r\n optimizer = optim.Adam(net.parameters(),lr,weight_decay=5e-4)\r\n if Cosine_lr:\r\n lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)\r\n else:\r\n lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)\r\n\r\n if Use_Data_Loader:\r\n train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic, is_train=True)\r\n val_dataset = YoloDataset(lines[num_train:], (input_shape[0], input_shape[1]), mosaic=False, is_train=False)\r\n gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,\r\n drop_last=True, collate_fn=yolo_dataset_collate)\r\n gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4,pin_memory=True, \r\n drop_last=True, collate_fn=yolo_dataset_collate)\r\n else:\r\n gen = Generator(Batch_size, lines[:num_train],\r\n (input_shape[0], input_shape[1])).generate(train=True, mosaic = mosaic)\r\n gen_val = Generator(Batch_size, lines[num_train:],\r\n (input_shape[0], input_shape[1])).generate(train=False, mosaic = mosaic)\r\n\r\n epoch_size = max(1, num_train//Batch_size)\r\n epoch_size_val = num_val//Batch_size\r\n #------------------------------------#\r\n # 解冻后训练\r\n #------------------------------------#\r\n for param in model.backbone.parameters():\r\n param.requires_grad = True\r\n\r\n for epoch in range(Freeze_Epoch,Unfreeze_Epoch):\r\n fit_one_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,gen_val,Unfreeze_Epoch,Cuda)\r\n lr_scheduler.step()\r\n" ]
[ [ "numpy.array", "torch.optim.lr_scheduler.StepLR", "numpy.reshape", "numpy.random.seed", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.no_grad", "numpy.random.shuffle", "numpy.shape", "torch.from_numpy", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.load", "torch.nn.DataParallel" ] ]
shreyanshsatvik/cardetection_and_localization
[ "38f70442de310abb193b53083482078c094aff2e" ]
[ "yolonew.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 27 14:15:06 2020\r\n\r\n@author: Shreyansh Satvik\r\n\"\"\"\r\nfrom keras.models import load_model\r\nfrom glob import glob\r\nimport argparse\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.pyplot import imshow\r\nimport scipy.io\r\nimport scipy.misc\r\nimport numpy as np\r\nimport pandas as pd\r\nimport PIL\r\nimport tensorflow as tf\r\nfrom keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\r\nfrom keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D\r\nfrom keras import backend as K\r\nfrom keras.layers import Input, Lambda, Conv2D\r\nfrom keras.models import load_model, Model\r\nfrom keras.models import model_from_json\r\nfrom yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes\r\nfrom yad2k.models.keras_yolo import yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body,yolo_head\r\nimport h5py\r\nfrom keras.models import Sequential\r\nfrom keras.applications.vgg16 import VGG16\r\nfrom PIL import Image\r\nfrom keras.models import Sequential\r\nfrom utils import load_weights, Box, yolo_net_out_to_car_boxes, draw_box\r\nimport cv2\r\nimport os\r\n\r\nfrom configparser import ConfigParser\r\n\r\ndef yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):\r\n \r\n # Step 1: Compute box scores\r\n ### START CODE HERE ### (≈ 1 line)\r\n box_scores = box_confidence*box_class_probs\r\n ### END CODE HERE ###\r\n \r\n # Step 2: Find the box_classes using the max box_scores, keep track of the corresponding score\r\n ### START CODE HERE ### (≈ 2 lines)\r\n box_classes = K.argmax(box_scores,axis=-1)\r\n box_class_scores = K.max(box_scores,axis=-1)\r\n ### END CODE HERE ###\r\n \r\n # Step 3: Create a filtering mask based on \"box_class_scores\" by using \"threshold\". The mask should have the\r\n # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)\r\n ### START CODE HERE ### (≈ 1 line)\r\n filtering_mask = box_class_scores >= threshold\r\n ### END CODE HERE ###\r\n \r\n # Step 4: Apply the mask to box_class_scores, boxes and box_classes\r\n ### START CODE HERE ### (≈ 3 lines)\r\n scores = tf.boolean_mask(box_class_scores,filtering_mask)\r\n boxes = tf.boolean_mask(boxes,filtering_mask)\r\n classes = tf.boolean_mask(box_classes,filtering_mask)\r\n ### END CODE HERE ###\r\n \r\n return scores, boxes, classes\r\n\r\ndef yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):\r\n\r\n \r\n max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()\r\n K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor\r\n \r\n # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep\r\n ### START CODE HERE ### (≈ 1 line)\r\n nms_indices = tf.image.non_max_suppression(boxes,scores,max_boxes_tensor,iou_threshold,name=None)\r\n ### END CODE HERE ###\r\n \r\n # Use K.gather() to select only nms_indices from scores, boxes and classes\r\n ### START CODE HERE ### (≈ 3 lines)\r\n scores = K.gather(scores,nms_indices)\r\n boxes = K.gather(boxes,nms_indices)\r\n classes = K.gather(classes,nms_indices)\r\n ### END CODE HERE ###\r\n \r\n return scores, boxes, classes\r\n\r\n\r\n\r\n\r\n\r\ndef yolo_eval(yolo_outputs, image_shape , max_boxes=10, score_threshold=.6, iou_threshold=.5):\r\n \r\n \r\n # Retrieve outputs of the YOLO model (≈1 line)\r\n box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs\r\n\r\n # Convert boxes to be ready for filtering functions (convert boxes box_xy and box_wh to corner coordinates)\r\n boxes = yolo_boxes_to_corners(box_xy, box_wh)\r\n\r\n # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)\r\n scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, score_threshold)\r\n \r\n # Scale boxes back to original image shape.\r\n boxes = scale_boxes(boxes, image_shape)\r\n\r\n # Use one of the functions you've implemented to perform Non-max suppression with \r\n # maximum number of boxes set to max_boxes and a threshold of iou_threshold (≈1 line)\r\n scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes , iou_threshold)\r\n \r\n ### END CODE HERE ###\r\n \r\n return scores, boxes, classes\r\n\r\n\r\n\"\"\"\r\nmodel.add(Conv2D(32, (3, 3), activation='relu', input_shape=(128, 128, 3)))\r\nmodel.add(BatchNormalization(axis=3,name='bn0'))\r\nmodel.add(Conv2D(16, (3, 3),strides=(1,1), activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(BatchNormalization(axis=3,name='bn1'))\r\n\r\nmodel.add(Conv2D(128, (2, 2), activation='relu'))\r\nmodel.add(BatchNormalization(axis=3,name='bn2'))\r\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\r\nmodel.add(BatchNormalization(axis=3,name='bn3'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\n\r\nmodel.add(Flatten())\r\nmodel.add(Dense(64, activation='relu'))\r\nmodel.add(Dense(1, activation='sigmoid'))\r\n\r\n\"\"\"\r\n\r\n####################################\r\nyolo_model = load_model(\"yolonew.h5\")\r\n####################################################\r\nsess = K.get_session()\r\nclass_names = read_classes(\"coco_classes(2).txt\")\r\nanchors = read_anchors(\"yolo_anchors.txt\")\r\n\r\n\r\nimage_shape = (1280., 720.) \r\n#new_model=yolo_model.layers.pop()\r\nyolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))\r\n\r\n\r\nscores, boxes, classes = yolo_eval(yolo_outputs, image_shape)\r\n\r\ndef predict(sess, image_file):\r\n \"\"\"\r\n Runs the graph stored in \"sess\" to predict boxes for \"image_file\". Prints and plots the predictions.\r\n \r\n Arguments:\r\n sess -- your tensorflow/Keras session containing the YOLO graph\r\n image_file -- name of an image stored in the \"images\" folder.\r\n \r\n Returns:\r\n out_scores -- tensor of shape (None, ), scores of the predicted boxes\r\n out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes\r\n out_classes -- tensor of shape (None, ), class index of the predicted boxes\r\n \r\n Note: \"None\" actually represents the number of predicted boxes, it varies between 0 and max_boxes. \r\n \"\"\"\r\n\r\n # Preprocess your image\r\n image, image_data = preprocess_image(\"images/\" + image_file, model_image_size = (608, 608))\r\n\r\n # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.\r\n # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})\r\n ### START CODE HERE ### (≈ 1 line)\r\n out_scores, out_boxes, out_classes = sess.run(fetches=[scores,boxes,classes],feed_dict={yolo_model.input:image_data,\r\n K.learning_phase():0})\r\n ### END CODE HERE ###\r\n\r\n # Print predictions info\r\n print('Found {} boxes for {}'.format(len(out_boxes), image_file))\r\n # Generate colors for drawing bounding boxes.\r\n colors = generate_colors(class_names)\r\n # Draw bounding boxes on the image file\r\n draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)\r\n # Save the predicted bounding box on the image\r\n image.save(os.path.join(\"out\", image_file), quality=90)\r\n # Display the results in the notebook\r\n output_image = scipy.misc.imread(os.path.join(\"out\", image_file))\r\n imshow(output_image)\r\n \r\n return out_scores, out_boxes, out_classes\r\n\r\n out_scores, out_boxes, out_classes = predict(sess, \"person2.jpg\")\r\n \r\n " ]
[ [ "tensorflow.variables_initializer", "tensorflow.boolean_mask", "tensorflow.image.non_max_suppression", "matplotlib.pyplot.imshow" ] ]
lesepuise/cryptHero
[ "2d4157d5f0a3afee440bc74d2199543b50aac8f9" ]
[ "map_objects/generator.py" ]
[ "import copy\nimport random\n\nimport numpy as np\nimport tcod\n\n\nclass BSPGenerator():\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self.bsp = tcod.bsp.BSP(1, 1, width - 1, height - 1)\n self.map = np.zeros(\n (width, height), dtype=bool, order=\"F\"\n )\n self.generate()\n \n def get_map(self):\n return self.map\n\n def generate(self):\n self.bsp.children = ()\n self.bsp.split_recursive(8, 10, 10, 1.5, 1.5)\n self.refresh()\n\n def refresh(self):\n self.map[...] = False\n for node in copy.deepcopy(self.bsp).inverted_level_order():\n self.traverse_node(node)\n\n def traverse_node(self, node):\n if not node.children:\n node.width -= 1\n node.height -= 1\n new_width = random.randint(min(node.width, 9), node.width)\n new_height = random.randint(min(node.height, 9), node.height)\n node.x += random.randint(0, node.width - new_width)\n node.y += random.randint(0, node.height - new_height)\n node.width, node.height = new_width, new_height\n for x in range(node.x, node.x + node.width):\n for y in range(node.y, node.y + node.height):\n self.map[x][y] = True\n else:\n left, right = node.children\n node.x = min(left.x, right.x)\n node.y = min(left.y, right.y)\n node.w = max(left.x + left.w, right.x + right.w) - node.x\n node.h = max(left.y + left.h, right.y + right.h) - node.y\n if node.horizontal:\n if left.x + left.w - 1 < right.x or right.x + right.w - 1 < left.x:\n x1 = random.randint(left.x, left.x + left.w - 1)\n x2 = random.randint(right.x, right.x + right.w - 1)\n y = random.randint(left.y + left.h, right.y)\n self.vline_up(x1, y - 1)\n self.hline(x1, y, x2)\n self.vline_down(x2, y + 1)\n else:\n minx = max(left.x, right.x)\n maxx = min(left.x + left.w - 1, right.x + right.w - 1)\n x = random.randint(minx, maxx)\n self.vline_down(x, right.y)\n self.vline_up(x, right.y - 1)\n else:\n if left.y + left.h - 1 < right.y or right.y + right.h - 1 < left.y:\n y1 = random.randint(left.y, left.y + left.h - 1)\n y2 = random.randint(right.y, right.y + right.h - 1)\n x = random.randint(left.x + left.w, right.x)\n self.hline_left(x - 1, y1)\n self.vline(x, y1, y2)\n self.hline_right(x + 1, y2)\n else:\n miny = max(left.y, right.y)\n maxy = min(left.y + left.h - 1, right.y + right.h - 1)\n y = random.randint(miny, maxy)\n self.hline_left(right.x - 1, y)\n self.hline_right(right.x, y)\n\n def vline(self, x, y1, y2):\n if y1 > y2:\n y1, y2 = y2, y1\n for y in range(y1, y2 + 1):\n self.map[x][y] = True\n\n def vline_up(self, x, y):\n while y >= 0 and not self.map[x][y]:\n self.map[x][y] = True\n y -= 1\n\n def vline_down(self, x, y):\n while y < self.height and not self.map[x][y]:\n self.map[x][y] = True\n y += 1\n\n def hline(self, x1, y, x2):\n if x1 > x2:\n x1, x2 = x2, x1\n for x in range(x1, x2 + 1):\n self.map[x][y] = True\n\n def hline_left(self, x, y):\n while x >= 0 and not self.map[x][y]:\n self.map[x][y] = True\n x -= 1\n\n def hline_right(self, x, y):\n while x < self.width and not self.map[x][y]:\n self.map[x][y] = True\n x += 1" ]
[ [ "numpy.zeros" ] ]
SysBioChalmers/Multi_scale_evolution
[ "b5f28ead733872519bc0758df034a076224c4253" ]
[ "evolution_analysis/code/ortholog_subset/Z4_Extract_non_duplicated_seq_from_OGs.py" ]
[ "#!/usr/bin/python\n\n# Note\n# The script is to analyze the duplicated gene seq for the 1011 sce sequence project.\n\nimport os\nimport pandas as pd\n\ninput0 = \"/media/luhongzhong/newdisk/Genomics_data/cds_align_unify/\"\nos.system(\"mkdir /media/luhongzhong/newdisk/Genomics_data/cds_align_unify_remove_duplicates/\")\noutfile0 = \"/media/luhongzhong/newdisk/Genomics_data/cds_align_unify_remove_duplicates/\"\n\nall_gene = os.listdir(\"/media/luhongzhong/newdisk/Genomics_data/cds_align_unify/\")\n\ngene_cluster = []\nduplicated_seq_num = []\nnon_duplicate_seq_num = []\n\nfor gene in all_gene:\n if \".phy\" in gene:\n print(gene)\n # fasta0 = input0 + \"YPR204W.phy\"\n fasta0 = input0 + gene\n s0 = open(fasta0, \"r\").readlines()\n # firstly build the id and seq dict\n general_information = s0[0].split(\" \")\n\n id_index = []\n id = []\n for i, s in enumerate(s0):\n if \" \\n\" in s:\n # print(s)\n id.append(s)\n id_index.append(i)\n id_seq_dict = {}\n for j, id0 in enumerate(id):\n # print(j)\n if j < len(id) - 1:\n index_s = id_index[j] + 1\n index_e = id_index[j + 1]\n seq_choose = s0[index_s:index_e]\n\n else:\n index_s = id_index[j] + 1\n seq_choose = s0[index_s:]\n # print(seq_choose)\n id_new = id0.strip(\"\\n\")\n id_new = id_new.strip(\" \")\n id_seq_dict[id_new] = seq_choose\n\n # check the duplicated seq\n id_all = []\n seq_all = []\n\n for key, value in id_seq_dict.items():\n value0 = \"\".join(value)\n id_all.append(key)\n seq_all.append(value0)\n\n df = pd.DataFrame({\"ID\": id_all, \"seq\": seq_all})\n duplicate0 = df[df.duplicated(['seq'], keep=False)]\n print(\"total seq:\" + str(len(seq_all)) + \"====> duplicate_seq:\" + str(len(duplicate0[\"seq\"])))\n # summarize the duplicated seq\n gene_cluster.append(gene)\n duplicated_seq_num.append(len(duplicate0[\"seq\"]))\n non_duplicate_seq_num.append(1012-len(duplicate0[\"seq\"]))\n\n # save the non duplicate ones\n new_df = df.drop_duplicates(subset=['seq'], keep=False)\n id_kept = new_df[\"ID\"].tolist()\n general_information_new = str(len(id_kept)) + \" \" + \" \".join(general_information[1:])\n file_out = outfile0 + gene\n out = open(file_out, \"w\")\n out.write(general_information_new)\n for ss0 in id_kept:\n out.write(ss0 + \" \" + \"\\n\")\n out.writelines(\"\".join(id_seq_dict[ss0]))\n out.close()\n\n\n\n\n# further summarize the duplicate seq information\nduplicate_seq = pd.DataFrame({\"cluster\": gene_cluster, \"duplicate_num\": duplicated_seq_num, \"unique_num\": non_duplicate_seq_num})\nduplicate_seq.sort_values(\"unique_num\", inplace=True)\nduplicate_seq[\"cluster\"] = duplicate_seq[\"cluster\"].str.replace(\".phy\",\"\")\n\n\nconserved_gene = duplicate_seq[duplicate_seq[\"unique_num\"] <=16] # here we just choose top 2.5%, 145 gene id\nconserved_gene_list = conserved_gene[\"cluster\"].tolist()\nprint(len(conserved_gene_list))\nconserved_gene_list0 = \",\".join(conserved_gene_list)\nprint(conserved_gene_list0)\n\n\nunconserved_gene = duplicate_seq[duplicate_seq[\"unique_num\"] >= 300] # here we just choose top 2.5%, 149 gene id\nunconserved_gene_list = unconserved_gene[\"cluster\"].tolist()\nprint(len(unconserved_gene_list))\nunconserved_gene_list0 = \",\".join(unconserved_gene_list)\nprint(unconserved_gene_list0)\n\nduplicate_seq.to_csv(\"/home/luhongzhong/Documents/R_code_for_graph/data/duplicate_gene_analysis_1011_sce.csv\")\n\n" ]
[ [ "pandas.DataFrame" ] ]
YurongYou/Hindsight
[ "94b3d2a388d357b0abf01192088f6f7d7de16aa6" ]
[ "downstream/OpenPCDet/tools/train.py" ]
[ "import argparse\nimport datetime\nimport glob\nimport os\nfrom pathlib import Path\nfrom test import repeat_eval_ckpt\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom tensorboardX import SummaryWriter\nimport wandb\n\nfrom pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file\nfrom pcdet.datasets import build_dataloader\nfrom pcdet.models import build_network, model_fn_decorator\nfrom pcdet.utils import common_utils\nfrom train_utils.optimization import build_optimizer, build_scheduler\nfrom train_utils.train_utils import train_model\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\ndef parse_config():\n parser = argparse.ArgumentParser(description='arg parser')\n parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')\n\n parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')\n parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')\n parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')\n parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')\n parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')\n parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')\n parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')\n parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')\n parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')\n parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')\n parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')\n parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')\n parser.add_argument('--world_size', type=int, default=1)\n parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')\n parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')\n parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,\n help='set extra config keys if needed')\n\n parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')\n parser.add_argument('--empty_cache_every', type=int, default=-1, help='empty cuda cache every x iter')\n parser.add_argument('--start_epoch', type=int, default=0, help='')\n parser.add_argument('--save_to_file', action='store_true', default=False, help='')\n parser.add_argument(\"--wandb_project\", type=str, default=None)\n\n args = parser.parse_args()\n\n cfg_from_yaml_file(args.cfg_file, cfg)\n cfg.TAG = Path(args.cfg_file).stem\n cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'\n\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs, cfg)\n\n return args, cfg\n\n\ndef main():\n args, cfg = parse_config()\n if args.launcher == 'none':\n dist_train = False\n total_gpus = 1\n else:\n total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(\n args.tcp_port, args.local_rank, world_size=args.world_size, backend='nccl'\n )\n dist_train = True\n\n if args.batch_size is None:\n args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU\n else:\n assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'\n args.batch_size = args.batch_size // total_gpus\n\n args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs\n\n if args.fix_random_seed:\n common_utils.set_random_seed(666)\n\n output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag\n ckpt_dir = output_dir / 'ckpt'\n output_dir.mkdir(parents=True, exist_ok=True)\n ckpt_dir.mkdir(parents=True, exist_ok=True)\n\n log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))\n logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)\n\n # log to file\n logger.info('**********************Start logging**********************')\n gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'\n logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)\n\n if dist_train:\n logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))\n for key, val in vars(args).items():\n logger.info('{:16} {}'.format(key, val))\n log_config_to_file(cfg, logger=logger)\n if cfg.LOCAL_RANK == 0:\n os.system('cp %s %s' % (args.cfg_file, output_dir))\n\n if cfg.LOCAL_RANK == 0 and args.wandb_project is not None:\n # hava to create this before calling tensorboard\n wandb.init(project=args.wandb_project,\n name=f\"{cfg.EXP_GROUP_PATH}_{cfg.TAG}_{args.extra_tag}\",\n sync_tensorboard=True,\n )\n wandb.config.update(args)\n wandb.config.update(cfg)\n\n tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None\n\n # -----------------------create dataloader & network & optimizer---------------------------\n train_set, train_loader, train_sampler = build_dataloader(\n dataset_cfg=cfg.DATA_CONFIG,\n class_names=cfg.CLASS_NAMES,\n batch_size=args.batch_size,\n dist=dist_train, workers=args.workers,\n logger=logger,\n training=True,\n merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,\n total_epochs=args.epochs\n )\n\n model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=train_set)\n if args.sync_bn:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n model.cuda()\n\n optimizer = build_optimizer(model, cfg.OPTIMIZATION)\n\n # load checkpoint if it is possible\n start_epoch = it = 0\n last_epoch = -1\n if args.pretrained_model is not None:\n model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)\n\n if args.ckpt is not None:\n it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)\n last_epoch = start_epoch + 1\n else:\n ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))\n if len(ckpt_list) > 0:\n ckpt_list.sort(key=os.path.getmtime)\n it, start_epoch = model.load_params_with_optimizer(\n ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger\n )\n last_epoch = start_epoch + 1\n\n model.train() # before wrap to DistributedDataParallel to support fixed some parameters\n if dist_train:\n model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])\n logger.info(model)\n\n # if args.merge_all_iters_to_one_epoch:\n # total_it_each_epoch = len(train_loader) // max(args.epochs, 1)\n total_iters_each_epoch = len(train_loader) if not args.merge_all_iters_to_one_epoch else len(\n train_loader) // args.epochs\n lr_scheduler, lr_warmup_scheduler = build_scheduler(\n optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,\n last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION\n )\n\n if cfg.LOCAL_RANK == 0 and args.wandb_project is not None:\n wandb.config.update({\"total_step\": len(train_loader) * args.epochs})\n # wandb.config.update(cfg)\n wandb.watch(model)\n\n # -----------------------start training---------------------------\n logger.info('**********************Start training %s/%s(%s)**********************'\n % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))\n train_model(\n model,\n optimizer,\n train_loader,\n model_func=model_fn_decorator(),\n lr_scheduler=lr_scheduler,\n optim_cfg=cfg.OPTIMIZATION,\n start_epoch=start_epoch,\n total_epochs=args.epochs,\n start_iter=it,\n rank=cfg.LOCAL_RANK,\n tb_log=tb_log,\n ckpt_save_dir=ckpt_dir,\n train_sampler=train_sampler,\n lr_warmup_scheduler=lr_warmup_scheduler,\n ckpt_save_interval=args.ckpt_save_interval,\n max_ckpt_save_num=args.max_ckpt_save_num,\n merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,\n empty_cache_every=args.empty_cache_every,\n silent_pbar=args.wandb_project is not None\n )\n\n logger.info('**********************End training %s/%s(%s)**********************\\n\\n\\n'\n % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))\n\n logger.info('**********************Start evaluation %s/%s(%s)**********************' %\n (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))\n test_set, test_loader, sampler = build_dataloader(\n dataset_cfg=cfg.DATA_CONFIG,\n class_names=cfg.CLASS_NAMES,\n batch_size=args.batch_size,\n dist=dist_train, workers=args.workers, logger=logger, training=False\n )\n eval_output_dir = output_dir / 'eval' / 'eval_with_train'\n eval_output_dir.mkdir(parents=True, exist_ok=True)\n args.start_epoch = max(args.epochs - 10, 0) # Only evaluate the last 10 epochs\n\n repeat_eval_ckpt(\n model.module if dist_train else model,\n test_loader, args, eval_output_dir, logger, ckpt_dir,\n dist_test=dist_train\n )\n logger.info('**********************End evaluation %s/%s(%s)**********************' %\n (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "torch.cuda.device_count" ] ]
victor-marchesini/covid
[ "454fd525c462c1a199322d96eed897eab8da36fe" ]
[ "_notebooks/update_srag_database.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\nimport os\nimport sqlite3 as sql\nimport pandas as pd\n\nfrom srag_functions import *\n\n\n# In[2]:\n\n\n# df_srag = get_srag_data(years=[2019,2020,2021],update=False,treat=True,save_local=True)\n\nframes = []\nfor year in [2019,2020,2021]:\n df = get_srag_data(years=[year],update=True,treat=True,save_local=True)\n df['ano'] = year\n frames.append(df)\n \ndf_srag = pd.concat(frames)\n\n\n\n# In[3]:\n\nprint('df_srag.shape:',df_srag.shape)\n\n\n# In[5]:\n\nif not os.path.exists('data/opendatasus'):\n os.mkdir('data/opendatasus')\n \ndb_name = 'srag'\ndb_path = f'data/opendatasus/{db_name}.db'\n\nconn = sql.connect(db_path)\ndf_srag.to_sql(db_name, conn, index=False, if_exists='replace')\n\nprint(f'data base saved as {db_name}.db')\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "pandas.concat" ] ]
ruanyangry/Machine-Learning-in-Materials-Characterization
[ "ac00fddd1dee0745079deefd4e13a4ea0d631815" ]
[ "FFT-Filter-STEM/scripts/pycroscopy-fft.py" ]
[ "# _*_ coding:utf-8 _*_\r\n\r\n'''\r\nAuthor: Ruan Yang\r\nEmail: [email protected]\r\n\r\nReference: https://pycroscopy.github.io/pycroscopy/auto_examples/\\\r\nplot_fft_2d_filtering.html#sphx-glr-auto-examples-plot-fft-2d-filtering-py\r\n\r\nPurpose: FFT & Filtering of Atomically Resolved Images\r\n\r\nFFT advantage: The power of the Fast Fourier Transform (FFT) is due in \\\r\npart to (as the name suggests) its speed and also to the fact that \\\r\ncomplex operations such as convolution and differentiation/integration \\\r\nare made much simpler when performed in the Fourier domain\r\n\r\nExamples:\r\n\r\n1. Load a image\r\n2. Fourier transform it\r\n3. Apply a smoothing filter\r\n4. Transform it back\r\n'''\r\n\r\n# First make sure the necessary module installed?\r\n\r\nfrom __future__ import division,unicode_literals,print_function\r\n\r\n# plot\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n# numpy\r\n\r\nimport numpy as np\r\nimport numpy.fft as npf\r\n\r\n# system\r\n\r\nimport os\r\nimport subprocess\r\nimport sys\r\n\r\n# defined install function\r\n\r\ndef install(package):\r\n\tsubprocess.call([sys.executable,\"-m\",\"pip\",\"install\",package])\r\n\t\r\n# try ... except\r\n\r\ntry:\r\n\timport wget\r\nexcept ImportError:\r\n\tprint(\"wget not found. will install with pip.\")\r\n\timport pip\r\n\tinstall(\"wget\")\r\n\timport wget\r\n\t\r\ntry:\r\n\timport pyUSID as usid\r\nexcept ImportError:\r\n\tprint(\"pyUSID not found. will install with pip\")\r\n\timport pip\r\n\tinstall(\"pyUSID\")\r\n\timport pyUSID as usid\r\n\t\r\n# Download input data file.\r\n\r\ndata_file_path=\"temp_STEM_STO.txt\"\r\n\r\nurl=\"https://raw.githubusercontent.com/pycroscopy/pycroscopy/master/data/STEM_STO_2_20.txt\"\r\n\r\n_=wget.download(url,data_file_path,bar=None)\r\n#_=wget.download(url,data_file_path,bar=True)\r\n\r\n# Get the data stored in download file.\r\n\r\nimage_raw=np.loadtxt(data_file_path,dtype=\"str\",delimiter=\"\\t\")\r\n\r\n# delete the temporarily downloaded file:\r\n\r\nos.remove(data_file_path)\r\n\r\n# convert the file from a string array to a numpy array of floating point numbers\r\n\r\nimage_raw=np.array(image_raw)\r\nimage_raw=image_raw[0:,0:-1].astype(np.float)\r\n#image_raw=image_raw[:,:].astype(np.float) --- wrong\r\n\r\nprint(\"#----------------------------------------#\")\r\nprint(\" raw data shape : = {}\".format(image_raw.shape))\r\nprint(\"#----------------------------------------#\")\r\nprint(\"\\n\")\r\n\r\n# subtract out the mean of the image\r\n\r\nimage_raw=image_raw-np.mean(image_raw)\r\n\r\n# keeping track of units between transformations\r\n\r\nx_pixels,y_pixels=np.shape(image_raw)\r\nx_edge_length=5.0 # nm\r\ny_edge_length=5.0 # nm\r\nx_sampling=x_pixels/x_edge_length\r\ny_sampling=y_pixels/y_edge_length\r\nx_axis_vec=np.linspace(-x_edge_length/2.0,x_edge_length/2.0,x_pixels)\r\ny_axis_vec=np.linspace(-y_edge_length/2.0,y_edge_length/2.0,y_pixels)\r\nx_mat,y_mat=np.meshgrid(x_axis_vec,y_axis_vec)\r\n\r\n# The axes in the Fourier domain are defined below\r\n\r\nu_max=x_sampling/2\r\nv_max=y_sampling/2\r\nu_axis_vec=np.linspace(-u_max/2,u_max/2,x_pixels)\r\nv_axis_vec=np.linspace(-v_max/2,v_max/2,y_pixels)\r\n\r\n# below: matrices of u-positions and v-positions\r\nu_mat,v_mat=np.meshgrid(u_axis_vec,v_axis_vec)\r\n\r\n# STEM image of STO\r\n\r\nfig,axis=plt.subplots(figsize=(5,5))\r\n_=usid.plot_utils.plot_map(axis,image_raw,cmap=plt.cm.inferno,clim=[0,6],\\\r\nx_vec=x_axis_vec,y_vec=y_axis_vec,num_ticks=5)\r\naxis.set_title(\"original image of STO captured via STEM\")\r\nplt.savefig(\"fft-1.jpg\",dpi=300)\r\nplt.show()\r\n\r\n# FFT transform by numpy.fft2\r\n\r\nfft_image_raw=npf.fft2(image_raw)\r\n\r\n# Plotting the magnitude 2D-FFT on a vertical log scales shows something\\\r\n# unexpected: there appears to be peaks at the corners and no information\\\r\n# at the center. This is because the output for the 'fft2' function flips\\\r\n# the frequency axes so that low frequencies are at the ends, and the \\\r\n# highest frequency is in the middle.\r\n\r\nfig,axis=plt.subplots(figsize=(5,5))\r\n_=usid.plot_utils.plot_map(axis,np.abs(fft_image_raw),cmap=plt.cm.OrRd,\\\r\nclim=[0,3E+3])\r\naxis.set_title(\"FFT2 of Image\")\r\nplt.savefig(\"fft-2.jpg\",dpi=300)\r\nplt.show()\r\n\r\n# To correct this, use the 'fftshift' command. fftshift brings the lowest\\\r\n# frequency components of the FFT back to the center of the plot\r\n\r\nfft_image_raw=npf.fftshift(fft_image_raw)\r\nfft_abs_image_raw=np.abs(fft_image_raw)\r\n\r\ndef crop_center(image,cent_size=128):\r\n\treturn image[image.shape[0]//2-cent_size//2:image.shape[0]//2+cent_size//2,\\\r\n\timage.shape[1]//2-cent_size//2:image.shape[1]//2+cent_size//2]\r\n\t\r\n# After the fftshift, the FFT looks right\r\n\r\nfig,axes=plt.subplots(ncols=2,figsize=(10,5))\r\nfor axis,img,title in zip(axes,[fft_abs_image_raw,crop_center(fft_abs_image_raw)],\\\r\n[\"FFT after fftshift-ing\",\"Zoomed view around origin\"]):\r\n\t_=usid.plot_utils.plot_map(axis,img,cmap=plt.cm.OrRd,clim=[0,1E+4])\r\n\taxis.set_title(title)\r\n\r\nfig.tight_layout()\r\n\r\nplt.savefig(\"fft-3.jpg\",dpi=300)\r\nplt.show()\r\n\r\n# redefine the Fourier domain in polar coordinates to make building the \\\r\n# radially symmetric function easier.\r\n# convert cartesian coordinates to polar radius\r\n\r\nr=np.sqrt(u_mat**2+v_mat**2)\r\n\r\n# An expression for the filter is given below. \r\n# Note, the width of the filter is defined in terms of the real space dimensions for ease of use.\r\n\r\n# inverse width of gaussian, units same as real space axes\r\n\r\nfilter_width=0.15\r\ngauss_filter=np.e**(-(r*filter_width)**2)\r\n\r\nfig,axes=plt.subplots(ncols=2,figsize=(10,5))\r\n_=usid.plot_utils.plot_map(axes[0],gauss_filter,cmap=plt.cm.OrRd)\r\naxes[0].set_title(\"Gaussian Filter\")\r\naxes[1].plot(gauss_filter[gauss_filter.shape[0]//2])\r\naxes[1].set_title(\"Cross section of filter\")\r\nfig.tight_layout()\r\nplt.savefig(\"fft-4.jpg\",dpi=300)\r\nplt.show()\r\n\r\n# Application of the filter to the data in the Fourier domain is done \\\r\n# simply by dot-multiplying the two matrices.\r\n\r\nF_m1_filtered=gauss_filter*fft_image_raw\r\n\r\n# To view the filtered data in the space domain, simply use the inverse fast Fourier transform ('ifft2'). \r\n\r\nimage_filtered=npf.ifft2(npf.ifftshift(F_m1_filtered))\r\nimage_filtered=np.real(image_filtered)\r\n\r\nfig,axes=plt.subplots(ncols=2,figsize=(10,5))\r\nfor axis,img,title in zip(axes,[image_raw,image_filtered],['original','filtered']):\r\n\t_=usid.plot_utils.plot_map(axis,img,cmap=plt.cm.inferno,x_vec=x_axis_vec,y_vec=y_axis_vec,\\\r\n\tnum_ticks=5)\r\n\taxis.set_title(title)\r\n\r\nfig.tight_layout()\r\n\r\nplt.savefig(\"fft-5.jpg\",dpi=300)\r\nplt.show()\r\n\r\n# artificially add a background to the original image\r\n\r\nbackground_distottion=0.2*(x_mat+y_mat+np.sin(2*np.pi*x_mat/x_edge_length))\r\nimage_w_background=image_raw+background_distottion\r\n\r\nfig,axes=plt.subplots(figsize=(10,5),ncols=2)\r\nfor axis,img,title in zip(axes,[background_distottion,image_w_background],\\\r\n['background', 'image with background']):\r\n\t_=usid.plot_utils.plot_map(axis,img,cmap=plt.cm.inferno,x_vec=x_axis_vec,\\\r\n\ty_vec=y_axis_vec,num_ticks=5)\r\n\taxis.set_title(title)\r\n\r\nfig.tight_layout()\r\n\r\nplt.savefig(\"fft-6.jpg\",dpi=300)\r\nplt.show()\r\n\r\n# inverse width of gaussian, units same as real space axes\r\n\r\nfilter_width=2\r\ninverse_gaus_filter=1-np.e**(-(r*filter_width)**2)\r\n\r\nfig,axis=plt.subplots()\r\n_=usid.plot_utils.plot_map(axis,inverse_gaus_filter,cmap=plt.cm.OrRd)\r\naxis.set_title(\"background filter\")\r\nplt.savefig(\"fft-7.jpg\",dpi=300)\r\nplt.show()\r\n\r\n# Let perform the same process of taking the FFT of the image\\\r\n# multiplying with the filter and taking the inverse Fourier transform\\\r\n# of the image to get the filtered image.\r\n\r\n# take the fft of the image\r\n\r\nfft_image_w_background=npf.fftshift(npf.fft2(image_w_background))\r\nfft_abs_image_background=np.abs(fft_image_w_background)\r\n\r\n# Apply the filter\r\nfft_image_corrected=fft_image_w_background*inverse_gaus_filter\r\n\r\n# perform the inverse fourier transform on the filtered data\r\nimage_corrected = np.real(npf.ifft2(npf.ifftshift(fft_image_corrected)))\r\n\r\n# find what was removed from the image by filtering\r\nfiltered_background = image_w_background - image_corrected\r\n\r\nfig,axes=plt.subplots(ncols=2,figsize=(10,5))\r\nfor axis,img,title in zip(axes,[image_corrected,filtered_background],\\\r\n[\"image with background subtracted\",\"background component that was removed\"]):\r\n\t_=usid.plot_utils.plot_map(axis,img,cmap=plt.cm.inferno,x_vec=x_axis_vec,\\\r\n\ty_vec=y_axis_vec,num_ticks=5)\r\n\taxis.set_title(title)\r\n\t\r\nfig.tight_layout() \r\nplt.savefig(\"fft-8.jpg\",dpi=300)\r\nplt.show()\r\n\t\r\n\t\r\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.fft.fft2", "matplotlib.pyplot.savefig", "numpy.fft.ifftshift", "numpy.real", "numpy.shape", "matplotlib.pyplot.subplots", "numpy.mean", "numpy.loadtxt", "numpy.abs", "numpy.sqrt", "numpy.fft.fftshift", "matplotlib.pyplot.show", "numpy.linspace", "numpy.meshgrid" ] ]
guildai/guild-cli
[ "d3db493fb7a4952a334684e36578dd4b18afa124" ]
[ "examples/tensorflow-versions/summary1.py" ]
[ "# Uses TF 1.x summary writer to log a scalar.\n#\n# Guild should extend the logged data with system scalars.\n\nimport sys\n\nimport tensorflow as tf\n\nassert len(sys.argv) >= 2, \"usage: summary1.py LOGDIR\"\n\nwriter = tf.summary.FileWriter(sys.argv[1])\n\n\ndef scalar_summary(tag, val):\n return tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=val)])\n\n\nwriter.add_summary(scalar_summary(\"x\", 1.0), 1)\nwriter.add_summary(scalar_summary(\"x\", 2.0), 2)\nwriter.add_summary(scalar_summary(\"x\", 3.0), 3)\nwriter.close()\n" ]
[ [ "tensorflow.summary.FileWriter", "tensorflow.Summary.Value" ] ]
suhitaghosh10/UATS
[ "fe295ca2e16e1b7404398b3b62e404778900d958", "fe295ca2e16e1b7404398b3b62e404778900d958" ]
[ "dataset_specific/kits/generate_data_uats.py", "dataset_specific/prostate/expts/prostate_temporal.py" ]
[ "import os\nfrom shutil import copyfile\n\nimport numpy as np\n\nfrom dataset_specific.kits import utils\n\nroot_path = '/cache/suhita/data/kits/'\nperc = 1.0\n# training\n\n\nfold_num = 1\nlabelled_path = '/data/suhita/temporal/kits/preprocessed_labeled_train'\nlabelled_fold_num = '/data/suhita/temporal/kits/Folds/train_fold' + str(fold_num) + '.npy'\nun_labelled_path = '/data/suhita/temporal/kits/output/UL_' + str(perc) + '_PP/'\nlabelled_train_num = np.load(labelled_fold_num).shape[0]\n\nlabelled_num = np.load(labelled_fold_num).shape[0]\nlabelled_files_lst = np.load(labelled_fold_num)\nun_labelled_files_lst = os.listdir(un_labelled_path)\n\ntrain_fold = np.load('/data/suhita/temporal/kits/Folds/train_fold' + str(fold_num) + '.npy')\nprint(train_fold[0:10])\nnr_samples = train_fold.shape[0]\n\n# np.random.seed(5)\nnp.random.seed(5)\nnp.random.shuffle(train_fold)\nprint(train_fold[0:10])\n\nlabelled_num_considrd = train_fold[:int(nr_samples * perc)]\nremaining_labelled = set(train_fold).difference(set(labelled_num_considrd))\n\ncounter = 0\n\ndata_path = '/cache/suhita/data/kits/fold_' + str(fold_num) + '_P' + str(perc)\nutils.makedir(data_path)\nfor i in labelled_num_considrd:\n # name = labelled_files_lst[i]\n print(i, counter)\n fold_name = 'case_' + str(counter)\n utils.makedir(os.path.join(data_path, fold_name))\n copyfile(os.path.join(labelled_path, i, 'img_left.npy'), os.path.join(data_path, fold_name, 'img_left.npy'))\n copyfile(os.path.join(labelled_path, i, 'img_right.npy'),\n os.path.join(data_path, fold_name, 'img_right.npy'))\n copyfile(os.path.join(labelled_path, i, 'segm_left.npy'),\n os.path.join(data_path, fold_name, 'segm_left.npy'))\n copyfile(os.path.join(labelled_path, i, 'segm_right.npy'),\n os.path.join(data_path, fold_name, 'segm_right.npy'))\n counter = counter + 1\n\nprint('remianing labelled')\nfor i in remaining_labelled:\n # name = labelled_files_lst[i]\n print(i, counter)\n fold_name = 'case_' + str(counter)\n utils.makedir(os.path.join(data_path, fold_name))\n copyfile(os.path.join(labelled_path, i, 'img_left.npy'), os.path.join(data_path, fold_name, 'img_left.npy'))\n copyfile(os.path.join(labelled_path, i, 'img_right.npy'),\n os.path.join(data_path, fold_name, 'img_right.npy'))\n copyfile(os.path.join(labelled_path, i, 'segm_left.npy'),\n os.path.join(data_path, fold_name, 'segm_left.npy'))\n copyfile(os.path.join(labelled_path, i, 'segm_right.npy'),\n os.path.join(data_path, fold_name, 'segm_right.npy'))\n counter = counter + 1\n\nprint('unlabelled start...')\n\nfor i in np.arange(len(un_labelled_files_lst)):\n name = un_labelled_files_lst[i]\n print(name, counter)\n fold_name = 'case_' + str(counter)\n utils.makedir(os.path.join(data_path, fold_name))\n copyfile(os.path.join(un_labelled_path, name, 'img_left.npy'),\n os.path.join(data_path, fold_name, 'img_left.npy'))\n copyfile(os.path.join(un_labelled_path, name, 'img_right.npy'),\n os.path.join(data_path, fold_name, 'img_right.npy'))\n copyfile(os.path.join(un_labelled_path, name, 'segm_left.npy'),\n os.path.join(data_path, fold_name, 'segm_left.npy'))\n copyfile(os.path.join(un_labelled_path, name, 'segm_right.npy'),\n os.path.join(data_path, fold_name, 'segm_right.npy'))\n counter = counter + 1\n", "import argparse\nimport os\n\nimport tensorflow as tf\n\nfrom dataset_specific.prostate.model.temporal_original import weighted_model\nfrom train.semi_supervised.temporal_original import train\nfrom utility.config import get_metadata\nfrom utility.constants import *\nfrom utility.utils import cleanup\n\n## Parse arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('-g', '--gpu_num', type=str, default='0', help='GPU Number')\nparser.add_argument('-f', '--fold_num', type=int, default=1, help='Fold Number')\nparser.add_argument('-e', '--ens_folder_name', type=str, help='ensemble folder name')\nparser.add_argument('-d', '--ds', type=str, default=PROSTATE_DATASET_NAME, help='dataset name')\n\nconfig = tf.compat.v1.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.allow_soft_placement = True\n\ntry:\n args = parser.parse_args()\n fold_num = args.fold_num\n perc = args.perc\n temp_path = args.temp_path\n gpu_num = args.gpu_num\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu_num\n metadata = get_metadata(args.ds, fold_num, perc)\n # Build Model\n wm = weighted_model()\n train(None, None,\n dataset_name=args.ds,\n ens_folder_name=temp_path,\n labelled_perc=perc,\n fold_num=fold_num,\n model_type=wm\n )\n\nfinally:\n if os.path.exists(metadata[m_root_temp_path] + temp_path):\n cleanup(metadata[m_root_temp_path] + temp_path)\n print('clean up done!')\n" ]
[ [ "numpy.random.seed", "numpy.load", "numpy.random.shuffle" ], [ "tensorflow.compat.v1.ConfigProto" ] ]
ichcanziho/SentimentAnalysisForClimateChange
[ "7e1d7dc95b4db5fac1fbef97c1adf20041a29717" ]
[ "core/corr_caus.py" ]
[ "###############################################################################\n# Functions to perform Spearman N-lag cross correlation of the sentiment time #\n# lines. Saves the obtained correlation matrices as SVG files. Also obtains #\n# the Granger causality between the time series. #\n# #\n# @author Jorge Ciprián #\n# Last updated: 21-05-2021. #\n###############################################################################\n\n# Imports.\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport pandasql as ps\nfrom pylab import rcParams\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.api import VAR\nfrom statsmodels.stats.stattools import durbin_watson\nfrom statsmodels.tsa.stattools import grangercausalitytests\nfrom statsmodels.tsa.stattools import adfuller, zivot_andrews\n\n# Main class. Contains the methods for N-lag Spearman cross-correlation and\n# Granger causality.\nclass CorrCausAnalyzer():\n def __init__(self, in_path_tweets, in_path_news, out_path):\n # Assigning attributes.\n self.in_dir_tweets = in_path_tweets\n self.in_dir_news = in_path_news\n self.out_dir = out_path\n\n # Method for loading the datasets and doing the required pre-processing\n # for correlation and causality analysis.\n def load_datasets(self):\n print(\"Loading datasets...\")\n # Getting source paths.\n news_source_path = self.in_dir_news\n tweets_source_dir = self.in_dir_tweets\n # Loading dataframes.\n news_df = pd.read_csv(news_source_path)\n tweets_df = pd.read_csv(tweets_source_dir)\n print(\"... done.\")\n\n # For the news dataset.\n print(\"Pre-processing news...\")\n # Dropping unnecessary columns.\n news_df = news_df.drop(['Show','Snippet','NOUN','ADJ','VERB','PROPN',\n 'SENT','SIZE','blob score', 'vader score'],1)\n # Defining query for generating the time series.\n query_news = \"\"\"SELECT year, month,\n SUM(CASE WHEN `vader category`='pos' THEN 1 ELSE 0 END) AS pos_count,\n SUM(CASE WHEN `vader category`='neg' THEN 1 ELSE 0 END) AS neg_count,\n SUM(CASE WHEN `vader category`='neu' THEN 1 ELSE 0 END) AS neu_count\n FROM news_df\n GROUP BY year, month;\"\"\"\n # Applying query and getting results.\n result_news = ps.sqldf(query_news, locals())\n # Generating date index.\n result_news[\"date\"] = pd.to_datetime(result_news['year'].astype(str) + \"-\" + result_news['month'].astype(str) + \"-1\", format='%Y-%m-%d')\n # Dropping month and year.\n result_news = result_news.drop(['year','month'],1)\n # Setting date as index.\n result_news = result_news.set_index('date')\n # We don't have full data for the first and last months: remove them.\n result_news = result_news.loc['2015-05-01':'2018-01-31']\n # Getting the column list.\n news_col_list = result_news.columns.tolist()\n print(\"... done.\")\n\n # For the Tweets dataset.\n print(\"Pre-processing tweets...\")\n # Dropping unnecessary columns.\n tweets_df = tweets_df.drop(['i','id','day','tweet','NOUN','ADJ','VERB',\n 'PROPN','SENT','SIZE','blob score', 'vader score'],1)\n # Defining query for generating the time series.\n query_tweets = \"\"\"SELECT year, month,\n SUM(CASE WHEN `vader category`='pos' THEN 1 ELSE 0 END) AS pos_count,\n SUM(CASE WHEN `vader category`='neg' THEN 1 ELSE 0 END) AS neg_count,\n SUM(CASE WHEN `vader category`='neu' THEN 1 ELSE 0 END) AS neu_count\n FROM tweets_df\n GROUP BY year, month;\"\"\"\n # Applying query and getting results.\n result_tweets = ps.sqldf(query_tweets, locals())\n # Generating date index.\n result_tweets[\"date\"] = pd.to_datetime(result_tweets['year'].astype(str) + \"-\" + result_tweets['month'].astype(str) + \"-1\", format='%Y-%m-%d')\n # Dropping month and year.\n result_tweets = result_tweets.drop(['year','month'],1)\n # Setting date as index.\n result_tweets = result_tweets.set_index('date')\n # We don't have full data for the first and last months: remove them.\n result_tweets = result_tweets.loc['2015-05-01':'2018-01-31']\n # Getting the column list.\n tweets_col_list = result_tweets.columns.tolist()\n print(\"... done.\")\n\n\n # Generating sentiment dataframes with news and tweets.\n pos_df = result_tweets[[tweets_col_list[0]]].join(result_news[[news_col_list[0]]],\n lsuffix='_tweets',\n rsuffix='_news')\n # Negative sentiment.\n neg_df = result_tweets[[tweets_col_list[1]]].join(result_news[[news_col_list[1]]],\n lsuffix='_tweets',\n rsuffix='_news')\n # Neutral sentiment.\n neu_df = result_tweets[[tweets_col_list[2]]].join(result_news[[news_col_list[2]]],\n lsuffix='_tweets',\n rsuffix='_news')\n # Returning readied dataframes.\n return pos_df, neg_df, neu_df\n\n # Method that checks for stationarity in the time series.For the news\n # time series, it does the ADF test. For the tweets dataset it performs the\n # Zivot-Andrews test to account for the potential structural break in this\n # time series. Assumes it receives the dataframes generated by the\n # load_datasets method; a dataframe with datetime index, a column for tweets\n # and a column of news, in that order.\n def check_stationarity(self, df):\n # Initializing flags.\n flag_news = False\n flag_tweets = False\n # Getting column list.\n col_list = df.columns.tolist()\n # Checking for tweets.\n result_tweets = zivot_andrews(df[col_list[0]])\n # Checking for news.\n result_news = adfuller(df[col_list[1]])\n\n # Displaying results.\n print(\"Result for news:\")\n print(\"P-value: \", result_news[1])\n if(result_news[1]<=0.05):\n flag_news = True\n print(\"Stationary: True\")\n else:\n print(\"Stationary: False\")\n print(\"Result for tweets:\")\n print(\"P-value: \", result_tweets[1])\n if(result_tweets[1]<=0.05):\n flag_tweets = True\n print(\"Stationary: True\")\n else:\n print(\"Stationary: False\")\n print(\"--------------------------------------\")\n return flag_news, flag_tweets\n\n # Method to differentiate a time series to make it stationary. You may\n # need to call this method more than once until the stationary test\n # checks out. Performs the operation in the indicated column and adjusts\n # all the dataframe to eliminate NA values.\n def diff_df(self, df, col_name):\n # Generating an internal copy of the dataframe.\n df_trans = df.copy()\n # Differentiating.\n df_trans[col_name] = df_trans[col_name].diff()\n # Dropping rows with NA values from the dataframe.\n df_trans = df_trans.dropna()\n return df_trans\n\n # Method that searches for the best max lag value for the VAR model while\n # ensuring it stays in within a given margin of the Durbin-Watson ideal\n # test scores. Takes into account the AIC as information metric. It assumes\n # stationary data.\n def best_lag_dw(self, df, threshold=0.2):\n model = VAR(df, freq=\"MS\")\n # Assumes stationary data.\n best_aic = 99999\n best_lag = None\n best_dw = None\n # Searching for best lag order.\n for i in range(1,16):\n result = model.fit(i)\n #print(\"Lag order: \", i, \" AIC: \", result.aic)\n # Checking with Durbin-Watson test for autocorrelation as well.\n dw_out = durbin_watson(result.resid)\n #print(\"DW test: \", dw_out)\n #print(abs(2.0-dw_out[0]))\n if((result.aic < best_aic) and (abs(2.0-round(dw_out[0],2))<=threshold) and (abs(2.0-round(dw_out[1],2))<=threshold)):\n #print(\"ENTRA\")\n best_aic = result.aic\n best_lag = i\n best_dw = dw_out\n print(\"Best lag order: \", best_lag, \" with an AIC score of: \", best_aic)\n print(\"Durbin-Watson results:\")\n for col, val in zip(df.columns, best_dw):\n print(col, ':', round(val, 2))\n print(\"-------------------------------------------------\")\n return best_aic, best_lag, best_dw\n\n # Method that performs the Granger causality test.\n def grangers_causation_matrix(self, data, variables, maxlag, test='ssr_chi2test', verbose=False):\n # Obtaining the results matrix.\n df = pd.DataFrame(np.zeros((len(variables), len(variables))), columns=variables, index=variables)\n for c in df.columns:\n for r in df.index:\n test_result = grangercausalitytests(data[[r, c]], maxlag=maxlag, verbose=False)\n p_values = [round(test_result[i+1][0][test][1],4) for i in range(maxlag)]\n if verbose: print(f'Y = {r}, X = {c}, P Values = {p_values}')\n min_p_value = np.min(p_values)\n df.loc[r, c] = min_p_value\n df.columns = [var + '_x' for var in variables]\n df.index = [var + '_y' for var in variables]\n # Providing feedback.\n print(\"Granger causality test results: \")\n print(df)\n print(\"-------------------------------------------------\")\n return df\n\n # Method to generate the N-lagged dataframe for cross-correlation.\n def df_derived_by_shift(self, df, lag=0, NON_DER=[]):\n df = df.copy()\n if not lag:\n return df\n cols ={}\n for i in range(1,lag+1):\n for x in list(df.columns):\n if x not in NON_DER:\n if not x in cols:\n cols[x] = ['{}_{}'.format(x, i)]\n else:\n cols[x].append('{}_{}'.format(x, i))\n for k,v in cols.items():\n columns = v\n dfn = pd.DataFrame(data=None, columns=columns, index=df.index)\n i = 1\n for c in columns:\n dfn[c] = df[k].shift(periods=i)\n i+=1\n df = pd.concat([df, dfn], axis=1)\n return df\n\n # Method to generate the N-lag Spearman cross-correlation, saving the\n # correlation matrices as SVG files. \"n\" refers to the number of lags.\n def n_lag_corr(self, df, filename, n=6):\n lag_df = self.df_derived_by_shift(df, n)\n corr_df = lag_df.corr(method='spearman')\n # Plotting and saving SVG file.\n plt.figure(figsize=(25,25))\n title = str(n) + \" months\"\n plt.title(title, y=1.05, size=16)\n mask = np.zeros_like(corr_df)\n mask[np.triu_indices_from(mask)] = True\n svm = sns.heatmap(corr_df, mask=mask, linewidths=0.1,vmax=1.0, vmin=-1.0,\n square=True, cmap='coolwarm', linecolor='white', annot=True)\n img_path = self.out_dir + filename\n plt.savefig(img_path, bbox_inches='tight')\n" ]
[ [ "numpy.zeros_like", "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.title", "numpy.triu_indices_from", "numpy.min", "matplotlib.pyplot.figure", "pandas.concat", "pandas.read_csv" ] ]
MarioCSilva/Space_Saving_Count
[ "eee32865e390bc10d81881cff836557e679fe261" ]
[ "src/tests.py" ]
[ "from collections import defaultdict\nfrom exact_counter import ExactCounter\nfrom space_saving_counter import SpaceSavingCounter\nimport time\nfrom math import sqrt\nfrom tabulate import tabulate\nfrom utils import *\nimport matplotlib.pyplot as plt\n\n\nclass Test():\n def __init__(self, fname=\"datasets/en_bible.txt\", stop_words_fname=\"./stopwords.txt\", epsilons=[0.0002, 0.0005, 0.0008, 0.001, 0.002], k=200):\n self.fname = fname\n self.stop_words_fname = stop_words_fname\n self.epsilons = sorted(epsilons, reverse=True)\n\n min_k = int(1 / max(epsilons))\n self.k = min_k if k > min_k else k\n\n self.run_test()\n\n\n def run_test(self):\n exact_counter, space_saving_counter =\\\n ExactCounter(self.fname, self.stop_words_fname), SpaceSavingCounter(self.fname, self.stop_words_fname)\n\n self.get_stats(exact_counter, exact_counter=True)\n self.get_stats(space_saving_counter)\n\n\n def get_stats(self, counter, exact_counter=False):\n print(f\"{counter}\\n\")\n\n plot_data = [[], [], [], [], []]\n headers = [\"Measure\"]\n data = [[\"Time\"], [\"Total Words\"], [\"Events\"], [\"Mean\"],\\\n [\"Minimum\"], [\"Maximum\"]]\n\n if not exact_counter:\n data.extend([[\"Accuracy\"], [\"Precision\"], [\"Avg. Precision\"]])\n for epsilon in self.epsilons:\n counter.epsilon = epsilon\n tic = time.time()\n counter.count()\n exec_time = round(time.time() - tic, 2)\n\n total_events = sum(counter.word_counter.values())\n total_words = len(counter.word_counter)\n min_events = min(counter.word_counter.values())\n max_events = max(counter.word_counter.values())\n mean = calc_mean(counter.word_counter.values())\n\n headers.append(f\"ɛ {epsilon}\")\n data[0].append(exec_time)\n data[1].append(total_words)\n data[2].append(total_events)\n data[3].append(mean)\n data[4].append(min_events)\n data[5].append(max_events)\n\n plot_data[0].append(epsilon)\n plot_data[1].append(exec_time)\n\n relative_precision, right_position_words, TP = 0, 0, 0\n top_words = counter.sort_words()[:self.k]\n for i, word in enumerate(self.exact_top_k_words):\n if word in top_words:\n TP += 1\n if word == top_words[i]:\n right_position_words += 1\n relative_precision += right_position_words / (i + 1) \n avg_relative_precision = round(relative_precision / self.k * 100, 2)\n FP = self.k - TP\n TN = self.total_words - self.k - FP\n precision = round(TP / self.k * 100, 2)\n # recall is equal to precision in this case since\n # it is \"retrieved\" the same amount of words (k)\n # therefore the denominator is the same\n accuracy = round((TP + TN) / self.total_words * 100, 2)\n\n data[6].append(accuracy)\n data[7].append(precision)\n data[8].append(avg_relative_precision)\n plot_data[2].append(accuracy)\n plot_data[3].append(precision)\n plot_data[4].append(avg_relative_precision)\n\n print(tabulate(data, headers=headers))\n\n plt.plot(plot_data[0], plot_data[1], label=\"Execution Time\")\n plt.ylabel(\"Time (s)\")\n plt.xlabel(\"Epsilon\")\n plt.xticks(plot_data[0])\n plt.title(counter)\n plt.legend()\n plt.show()\n\n plt.plot(plot_data[0], plot_data[2], label=\"Accuracy (%)\", linewidth=3)\n plt.plot(plot_data[0], plot_data[3], label=\"Precision (%)\")\n plt.plot(plot_data[0], plot_data[4], label=\"Average Precision (%)\")\n plt.ylabel(\"Percentage (%)\")\n plt.xlabel(\"Epsilon\")\n plt.xticks(plot_data[0])\n plt.title(counter)\n plt.legend()\n plt.show()\n return\n\n tic = time.time()\n counter.count()\n exec_time = round(time.time() - tic, 3)\n self.exact_top_k_words = counter.sort_words()[:self.k]\n self.total_words = len(counter.word_counter)\n total_events = sum(counter.word_counter.values())\n min_events = min(counter.word_counter.values())\n max_events = max(counter.word_counter.values())\n mean = calc_mean(counter.word_counter.values())\n\n headers.append(\"Value\")\n data[0].append(exec_time)\n data[1].append(self.total_words)\n data[2].append(total_events)\n data[3].append(mean)\n data[4].append(min_events)\n data[5].append(max_events)\n\n print(f\"{tabulate(data, headers=headers)}\\n\")" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks" ] ]
yhhhli/pytorch-cifar
[ "699c68a3b77e780591539d1a5d1a1d96bc63d0fb" ]
[ "weight_scale.py" ]
[ "from torch.nn.parameter import Parameter\nfrom torch import _weight_norm, norm_except_dim\nimport torch\n\nclass WeightScale(object):\n def __init__(self, name, dim):\n if dim is None:\n dim = -1\n self.name = name\n self.dim = dim\n\n def compute_weight(self, module):\n g = getattr(module, self.name + '_g')\n v = getattr(module, self.name + '_v')\n return v.mul(g.expand_as(v))\n\n @staticmethod\n def apply(module, name, dim):\n for k, hook in module._forward_pre_hooks.items():\n if isinstance(hook, WeightScale) and hook.name == name:\n raise RuntimeError(\"Cannot register two weight_norm hooks on \"\n \"the same parameter {}\".format(name))\n\n if dim is None:\n dim = -1\n\n fn = WeightScale(name, dim)\n\n weight = getattr(module, name)\n\n # remove w from parameter list\n del module._parameters[name]\n\n # add g and v as new parameters and express w as g/||v|| * v\n module.register_parameter(name + '_g', Parameter(torch.ones(weight.size()[0].reshape(-1,1,1,1))))\n module.register_parameter(name + '_v', Parameter(weight.data))\n setattr(module, name, fn.compute_weight(module))\n\n # recompute weight before every forward()\n module.register_forward_pre_hook(fn)\n\n return fn\n\n def remove(self, module):\n weight = self.compute_weight(module)\n delattr(module, self.name)\n del module._parameters[self.name + '_g']\n del module._parameters[self.name + '_v']\n module.register_parameter(self.name, Parameter(weight.data))\n\n def __call__(self, module, inputs):\n setattr(module, self.name, self.compute_weight(module))\n\n\ndef weight_scale(module, name='weight', dim=0):\n r\"\"\"Applies weight normalization to a parameter in the given module.\n .. math::\n \\mathbf{w} = g \\dfrac{\\mathbf{v}}{\\|\\mathbf{v}\\|}\n Weight normalization is a reparameterization that decouples the magnitude\n of a weight tensor from its direction. This replaces the parameter specified\n by :attr:`name` (e.g. ``'weight'``) with two parameters: one specifying the magnitude\n (e.g. ``'weight_g'``) and one specifying the direction (e.g. ``'weight_v'``).\n Weight normalization is implemented via a hook that recomputes the weight\n tensor from the magnitude and direction before every :meth:`~Module.forward`\n call.\n By default, with ``dim=0``, the norm is computed independently per output\n channel/plane. To compute a norm over the entire weight tensor, use\n ``dim=None``.\n See https://arxiv.org/abs/1602.07868\n Args:\n module (Module): containing module\n name (str, optional): name of weight parameter\n dim (int, optional): dimension over which to compute the norm\n Returns:\n The original module with the weight norm hook\n Example::\n >>> m = weight_norm(nn.Linear(20, 40), name='weight')\n >>> m\n Linear(in_features=20, out_features=40, bias=True)\n >>> m.weight_g.size()\n torch.Size([40, 1])\n >>> m.weight_v.size()\n torch.Size([40, 20])\n \"\"\"\n WeightScale.apply(module, name, dim)\n return module\n\n\ndef remove_weight_scale(module, name='weight'):\n r\"\"\"Removes the weight normalization reparameterization from a module.\n Args:\n module (Module): containing module\n name (str, optional): name of weight parameter\n Example:\n >>> m = weight_norm(nn.Linear(20, 40))\n >>> remove_weight_norm(m)\n \"\"\"\n for k, hook in module._forward_pre_hooks.items():\n if isinstance(hook, WeightScale) and hook.name == name:\n hook.remove(module)\n del module._forward_pre_hooks[k]\n return module\n\n raise ValueError(\"weight_norm of '{}' not found in {}\"\n .format(name, module))" ]
[ [ "torch.nn.parameter.Parameter" ] ]
OpheliaLjh/analytics-zoo
[ "bb3aa8cf8d109df960b352b1bdee23ef98c2a25a" ]
[ "pyzoo/test/zoo/orca/automl/autoestimator/test_autoestimator_keras.py" ]
[ "#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport tensorflow as tf\nimport numpy as np\nfrom unittest import TestCase\nfrom zoo.orca.automl.auto_estimator import AutoEstimator\nfrom zoo.automl.recipe.base import Recipe\nimport pytest\n\n\ndef model_creator(config):\n model = tf.keras.models.Sequential([tf.keras.layers.Dense(config[\"hidden_size\"],\n input_shape=(1,)),\n tf.keras.layers.Dense(1)])\n model.compile(loss=\"mse\",\n optimizer=tf.keras.optimizers.SGD(config[\"lr\"]),\n metrics=[\"mse\"])\n return model\n\n\ndef get_train_val_data():\n def get_x_y(size):\n x = np.random.rand(size)\n y = x / 2\n\n x = x.reshape((-1, 1))\n y = y.reshape((-1, 1))\n return x, y\n train_x, train_y = get_x_y(size=1000)\n val_x, val_y = get_x_y(size=400)\n data = {'x': train_x, 'y': train_y, 'val_x': val_x, 'val_y': val_y}\n return data\n\n\nclass LinearRecipe(Recipe):\n def search_space(self, all_available_features):\n from zoo.orca.automl import hp\n return {\n \"hidden_size\": hp.choice([5, 10]),\n \"lr\": hp.choice([0.001, 0.003, 0.01]),\n \"batch_size\": hp.choice([32, 64])\n }\n\n def runtime_params(self):\n return {\n \"training_iteration\": 1,\n \"num_samples\": 4\n }\n\n\nclass TestTFKerasAutoEstimator(TestCase):\n def setUp(self) -> None:\n from zoo.orca import init_orca_context\n init_orca_context(cores=8, init_ray_on_spark=True)\n\n def tearDown(self) -> None:\n from zoo.orca import stop_orca_context\n stop_orca_context()\n\n def test_fit(self):\n auto_est = AutoEstimator.from_keras(model_creator=model_creator,\n logs_dir=\"/tmp/zoo_automl_logs\",\n resources_per_trial={\"cpu\": 2},\n name=\"test_fit\")\n data = get_train_val_data()\n auto_est.fit(data,\n recipe=LinearRecipe(),\n metric=\"mse\")\n best_model = auto_est.get_best_model()\n assert \"hidden_size\" in best_model.config\n\n def test_fit_multiple_times(self):\n auto_est = AutoEstimator.from_keras(model_creator=model_creator,\n logs_dir=\"/tmp/zoo_automl_logs\",\n resources_per_trial={\"cpu\": 2},\n name=\"test_fit\")\n data = get_train_val_data()\n auto_est.fit(data,\n recipe=LinearRecipe(),\n metric=\"mse\")\n with pytest.raises(RuntimeError):\n auto_est.fit(data,\n recipe=LinearRecipe(),\n metric=\"mse\")\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n" ]
[ [ "tensorflow.keras.optimizers.SGD", "numpy.random.rand", "tensorflow.keras.layers.Dense" ] ]
prisae/empymod
[ "c01eae0ac51b37864c0b68bf0c207c1bd7c7e585" ]
[ "examples/educational/random_noise_f_domain.py" ]
[ "r\"\"\"\nAdding random noise to frequency-domain CSEM data\n=================================================\n\nAdding random noise to frequency-domain CSEM data is not a trivial task, and\nthere are many different ways how it can be done. The problem comes from the\nfact that we live in the time domain, we do our measurements in the time\ndomain, our noise is therefore time-domain noise, but we want to add\ncomplex-valued noise in the frequency domain.\n\nHere we are going to look at some possibilities. However, keep in mind that\nthere are more possibilities than the ones shown here.\n\n1. Theory\n---------\n\nLet's assume we have complex-valued data :math:`d=x+\\text{i}y`. We can add\nrandom noise to the data in the following way,\n\n.. math::\n :label: generalnoise\n\n \\tilde{d} = d + \\sigma \\left[(1 + \\text{i})\\,\\mu + \\mathcal{R} \\right] \\, ,\n\nwhere :math:`\\tilde{d}` is the data with added noise, :math:`\\sigma` is the\nstandard deviation, :math:`\\mu` is the mean value of the randomly distributed\nnoise, and :math:`\\mathcal{R}` is the random noise. We define the standard\ndeviation as\n\n.. math::\n :label: stdev\n\n \\sigma = \\sqrt{\\epsilon_\\text{n}^2 + \\left(\\epsilon_\\text{r}|d|\\right)^2 }\n \\, ,\n\nwhere :math:`\\epsilon_\\text{n}` is the noise floor and\n:math:`\\epsilon_\\text{r}` is the relative error.\n\nWe compare here three ways of computing the random noise :math:`\\mathcal{R}`.\nOf course there are other possibilities, e.g., one could make the non-zero mean\na random realization itself.\n\n\n1. Adding random uniform phases but constant amplitude\n\n .. math::\n :label: uniform\n\n \\mathcal{R}_\\text{wn} = \\exp[\\text{i}\\,\\mathcal{U}(0, 2\\pi)] \\, ,\n\n where :math:`\\mathcal{U}(0, 2\\pi)` is the uniform distribution and its\n range. This adds white noise with a flat amplitude spectrum and random\n phases.\n\n\n2. Adding Gaussian noise to real and imaginary parts\n\n - Adding correlated random Gaussian noise\n\n .. math::\n :label: cgaussian\n\n \\mathcal{R}_\\text{gc} = (1+\\text{i})\\,\\mathcal{N}(0, 1) \\, ,\n\n where :math:`\\mathcal{N}(0, 1)` is the standard normal distribution of\n zero mean and unit standard deviation.\n\n - Adding uncorrelated random Gaussian noise\n\n Above is the correlated version. Noise could also be added completely\n uncorrelated,\n\n .. math::\n :label: ugaussian\n\n \\mathcal{R}_\\text{gu} =\n \\mathcal{N}(0, 1) + \\text{i}\\,\\mathcal{N}(0, 1) \\, .\n\n\"\"\"\n\nimport empymod\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.style.use('bmh')\n\n\n###############################################################################\n# Noise computation\n# ~~~~~~~~~~~~~~~~~\n\n# Initiate random number generator.\nrng = np.random.default_rng()\n\n\ndef add_noise(data, ntype, rel_error, noise_floor, mu):\n \"\"\"Add random noise to complex-valued data.\n\n If `ntype='white_noise'`, complex noise is generated from uniform randomly\n distributed phases.\n\n If `ntype='gaussian_correlated'`, correlated Gaussian random noise is added\n to real and imaginary part.\n\n If `ntype='gaussian_uncorrelated'`, uncorrelated Gaussian random noise is\n added to real and imaginary part.\n\n \"\"\"\n\n # Standard deviation\n std = np.sqrt(noise_floor**2 + (rel_error*abs(data))**2)\n\n # Random noise\n if ntype == 'gaussian_correlated':\n noise = rng.standard_normal(data.size)*(1+1j)\n elif ntype == 'gaussian_uncorrelated':\n noise = 1j*rng.standard_normal(data.size)\n noise += rng.standard_normal(data.size)\n else:\n noise = np.exp(1j * rng.uniform(0, 2*np.pi, data.size))\n\n # Scale and move noise; add to data and return\n return data + std*((1+1j)*mu + noise)\n\n\ndef stack(n, data, ntype, **kwargs):\n \"\"\"Stack n-times the noise, return normalized.\"\"\"\n out = add_noise(data, ntype, **kwargs)/n\n for _ in range(n-1):\n out += add_noise(data, ntype, **kwargs)/n\n return out\n\n\n###############################################################################\n# 2. Graphical illustration\n# -------------------------\n#\n# The following is a graphical illustration. Please note that the relative\n# error is **very** high (20%)! This is only for illustration purposes.\n\n# Inputs\nd = np.array([6+2j]) # observed data point\nmean = 0.3 # Non-zero mean\nrelative_error = 0.2 # Very high relative error\nstd = relative_error*abs(d) # std (without noise floor)\n\n# Create figure\nfig, axs = plt.subplots(2, 1, figsize=(8, 10), sharex=True, sharey=True)\nax1, ax2 = axs\n\n# Titles\nfig.suptitle(r\"Random noise with $\\epsilon_n = 0, \"\n f\"\\\\epsilon_r={relative_error}, \\\\mu={mean}$\", y=1, fontsize=20)\nax1.set_title('Theoretical distributions')\nax2.set_title('Random realizations')\n\n# Plot data point\nfor ax in axs:\n ax.plot(np.r_[0., d.real], np.r_[0., d.imag], '--', c='.5')\n ax.plot(d.real, d.imag, 'ko', ms=10, label='$d^{obs}$', zorder=10)\n\n\n# Mean and standard deviation\nax1.plot(d.real+np.r_[0, std*mean], d.imag+np.r_[0, std*mean],\n 'C8', label=r'Scaled mean $\\sigma (1+i)\\mu$', zorder=9)\nax1.plot(d.real+np.r_[std*mean, std*(1+mean)],\n d.imag+np.r_[std*mean, std*mean],\n 'C1', label=r'Standard deviation $\\sigma$')\n\n\n# Random uniform phase\nuniform_mean = std * ((1+1j)*mean + np.exp(1j*np.linspace(0, 2*np.pi, 301)))\nax1.plot(d.real+uniform_mean.real, d.imag+uniform_mean.imag,\n 'C0', label='Random uniform phase')\n\n\n# Gaussian\nfor i in range(1, 3):\n # Correlated\n ax1.plot(d.real + np.r_[-std, +std]*i + std*mean,\n d.imag + np.r_[-std, +std]*i + std*mean,\n 'C3-', lw=6-2*i,\n label=f'Gaussian $\\\\pm {i} \\\\sigma$, correlated')\n\n # Uncorrelated\n ax1.plot(d.real + np.r_[-std, -std, +std, +std, -std]*i + std*mean,\n d.imag + np.r_[-std, +std, +std, -std, -std]*i + std*mean,\n 'C2:', lw=6-2*i,\n label=f'Gaussian $\\\\pm {i} \\\\sigma$, uncorrelated')\n\n\n# Plot random realizations\ndata = np.ones(300, dtype=complex)*d\nshape = data.shape\nrng = np.random.default_rng()\nls = ['C0x', 'C3+', 'C2x']\nntypes = ['white_noise', 'gaussian_correlated', 'gaussian_uncorrelated']\nfor i, ntype in enumerate(ntypes):\n\n # Add random noise of ntype.\n ndata = add_noise(data, ntype, relative_error, 0.0, mean)\n ax2.plot(ndata.real, ndata.imag, ls[i], label=ntype)\n\n\n# Axis etc\nfor ax in axs:\n ax.axhline(c='k')\n ax.axvline(c='k')\n ax.legend(framealpha=1, loc='upper left')\n ax.set_aspect('equal')\n ax.set_ylabel('Imaginary part')\n ax.set_xlim([-4, 10])\nax2.set_xlabel('Real part')\n\n# fig.tight_layout()\nfig.show()\n\n###############################################################################\n#\n# Intuitively one might think that the Gaussian uncorrelated noise is the\n# \"best\" one, as it looks truly random. However, it is arguably the least\n# \"physical\" one, as real and imaginary part of the electromagnetic field are\n# not independent - if one changes, the other changes too. The uniformly\n# distributed phases (blue circle) is the most physical noise corresponding to\n# white noise adding random phases with a constant amplitude.\n#\n# To get a better understanding we look at some numerical examples where we\n# plot amplitude-vs-offset for a fixed frequency, and amplitude-vs-frequency\n# for a fixed offset; for single realizations and when we stack it many times\n# in order to reduce the noise.\n#\n# 3. Numerical examples\n# ---------------------\n#\n# Model\n# ~~~~~\n\n# Model parameters\nmodel = {\n 'src': (0, 0, 0), # Source at origin\n 'depth': [], # Homogenous space\n 'res': 3, # 3 Ohm.m\n 'ab': 11, # Ex-source, Ex-receiver}\n}\n\n# Single offset and offsets\noffs = np.linspace(1000, 15000, 201)\noff = 5000\n\n# Single frequency and frequencies\nfreqs = np.logspace(-3, 2, 201)\nfreq = 1\n\n# Responses\noresp = empymod.dipole(\n rec=(offs, offs*0, 0), # Inline receivers\n freqtime=freq,\n **model\n)\nfresp = empymod.dipole(\n rec=(5000, 0, 0), # Inline receiver\n freqtime=freqs,\n **model,\n)\n\n# Relative error, noise floor, mean of noise\nrel_error = 0.05\nnoise_floor = 1e-15\nn_stack = 1000\n\n# Phase settings: wrapped, radians, lag-defined (+iw)\nphase = {'unwrap': False, 'deg': False, 'lag': True}\n\n\n###############################################################################\n# Plotting function\n# ~~~~~~~~~~~~~~~~~\n\ndef error(resp, noise):\n \"\"\"Return relative error (%) of noise with respect to resp.\"\"\"\n return 100*abs((noise-resp)/resp)\n\n\ndef figure(x, data, reim, comp):\n fig, axs = plt.subplots(2, 4, constrained_layout=True,\n figsize=(14, 8), sharex=True)\n\n axs[0, 0].set_title('|Real| (V/m)')\n axs[0, 0].plot(x, abs(data.real), 'k')\n axs[0, 0].plot(x, abs(reim.real), 'C0')\n axs[0, 0].plot(x, abs(comp.real), 'C1--')\n axs[0, 0].set_yscale('log')\n\n axs[1, 0].plot(x, error(data.real, reim.real), 'C0')\n axs[1, 0].plot(x, error(data.real, comp.real), 'C1--')\n axs[1, 0].set_ylabel('Rel. Error (%)')\n\n axs[0, 1].set_title('|Imaginary| (V/m)')\n axs[0, 1].plot(x, abs(data.imag), 'k', label='Data')\n axs[0, 1].plot(x, abs(reim.imag), 'C0', label='Noise to Re; Im')\n axs[0, 1].plot(x, abs(comp.imag), 'C1--', label='Noise to Complex')\n axs[0, 1].set_yscale('log')\n axs[0, 1].legend(fontsize=12, framealpha=1)\n\n axs[1, 1].plot(x, error(data.imag, reim.imag), 'C0')\n axs[1, 1].plot(x, error(data.imag, comp.imag), 'C1--')\n\n axs[0, 2].set_title('Amplitude (V/m)')\n axs[0, 2].plot(x, data.amp(), 'k')\n axs[0, 2].plot(x, reim.amp(), 'C0')\n axs[0, 2].plot(x, comp.amp(), 'C1--')\n axs[0, 2].set_yscale('log')\n\n axs[1, 2].plot(x, error(data.amp(), reim.amp()), 'C0')\n axs[1, 2].plot(x, error(data.amp(), comp.amp()), 'C1--')\n\n axs[0, 3].set_title('Phase (rad)')\n axs[0, 3].plot(x, data.pha(**phase), 'k')\n axs[0, 3].plot(x, reim.pha(**phase), 'C0')\n axs[0, 3].plot(x, comp.pha(**phase), 'C1--')\n\n axs[1, 3].plot(x, error(data.pha(**phase), reim.pha(**phase)), 'C0')\n axs[1, 3].plot(x, error(data.pha(**phase), comp.pha(**phase)), 'C1--')\n\n return fig, axs\n\n\n###############################################################################\n# 3.1 Offset-range for single frequency\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef offset_single(mu):\n \"\"\"Single frequency, many offsets, one realization.\"\"\"\n # Add noise\n inp = {'rel_error': rel_error, 'noise_floor': noise_floor, 'mu': mu}\n onoise_reim = add_noise(oresp, 'gaussian_correlated', **inp)\n onoise_comp = add_noise(oresp, 'white_noise', **inp)\n\n fig, axs = figure(offs, oresp, onoise_reim, onoise_comp)\n fig.suptitle(f\"Inline $E_{{xx}}$; $s_z=r_z=0$; $f=${freq} Hz; \"\n f\"fullspace of {model['res']} $\\\\Omega\\\\,$m; $\\\\mu=${mu}\",\n fontsize=20)\n\n for i in range(3):\n axs[0, i].set_ylim([1e-19, 3e-10])\n\n for i in range(4):\n axs[1, i].set_xlabel('Offset (m)')\n axs[1, i].set_yscale('log')\n axs[1, i].set_ylim([1e-2, 1e6])\n\n\n###############################################################################\n\noffset_single(mu=0.0)\n\n###############################################################################\n\noffset_single(mu=0.5)\n\n\n###############################################################################\n# 3.2 Offset-range for single frequency - STACKED\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\ndef offset_stacked(mu):\n \"\"\"Single frequency, many offsets, stacked.\"\"\"\n # Stack noise\n inp = {'rel_error': rel_error, 'noise_floor': noise_floor, 'mu': mu}\n sonoise_reim = stack(n_stack, oresp, 'gaussian_correlated', **inp)\n sonoise_comp = stack(n_stack, oresp, 'white_noise', **inp)\n\n fig, axs = figure(offs, oresp, sonoise_reim, sonoise_comp)\n fig.suptitle(f\"STACKED {n_stack} times; $\\\\mu=${mu}\", fontsize=20)\n\n for i in range(3):\n axs[0, i].set_ylim([1e-19, 3e-10])\n\n for i in range(4):\n axs[1, i].set_xlabel('Offset (m)')\n axs[1, i].set_ylim([-5, 40])\n\n\n###############################################################################\n\noffset_stacked(mu=0.0)\n\n###############################################################################\n\noffset_stacked(mu=0.5)\n\n\n###############################################################################\n# 3.3 Frequency-range for single offset\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef frequency_single(mu):\n \"\"\"Single offset, many frequencies, one realization.\"\"\"\n # Add noise\n inp = {'rel_error': rel_error, 'noise_floor': noise_floor, 'mu': mu}\n fnoise_reim = add_noise(fresp, 'gaussian_correlated', **inp)\n fnoise_comp = add_noise(fresp, 'white_noise', **inp)\n\n fig, axs = figure(freqs, fresp, fnoise_reim, fnoise_comp)\n fig.suptitle(f\"Inline $E_{{xx}}$; $s_z=r_z=0$; offset$=${off/1e3} km; \"\n f\"fullspace of {model['res']} $\\\\Omega\\\\,$m; $\\\\mu=${mu}\",\n fontsize=20)\n\n for i in range(3):\n axs[0, i].set_ylim([1e-18, 1e-11])\n\n for i in range(4):\n axs[0, i].set_xscale('log')\n axs[1, i].set_xlabel('Frequency (Hz)')\n axs[1, i].set_yscale('log')\n axs[1, i].set_ylim([1e-1, 1e5])\n\n\n###############################################################################\n\nfrequency_single(mu=0.0)\n\n###############################################################################\n\nfrequency_single(mu=0.5)\n\n\n###############################################################################\n# 3.4 Frequency-range for single offset - STACKED\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef frequency_stacked(mu):\n \"\"\"Single offset, many frequencies, stacked.\"\"\"\n # Stack noise\n inp = {'rel_error': rel_error, 'noise_floor': noise_floor, 'mu': mu}\n sfnoise_reim = stack(n_stack, fresp, 'gaussian_correlated', **inp)\n sfnoise_comp = stack(n_stack, fresp, 'white_noise', **inp)\n\n fig, axs = figure(freqs, fresp, sfnoise_reim, sfnoise_comp)\n fig.suptitle(f\"STACKED {n_stack} times; $\\\\mu=${mu}\", fontsize=20)\n\n for i in range(3):\n axs[0, i].set_ylim([1e-18, 3e-11])\n\n for i in range(4):\n axs[0, i].set_xscale('log')\n axs[1, i].set_xlabel('Frequency (Hz)')\n axs[1, i].set_ylim([-5, 40])\n\n\n###############################################################################\n\nfrequency_stacked(mu=0.0)\n\n###############################################################################\n\nfrequency_stacked(mu=0.5)\n\n\n###############################################################################\n\nempymod.Report()\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.random.default_rng", "matplotlib.pyplot.subplots", "matplotlib.pyplot.style.use", "numpy.linspace", "numpy.logspace" ] ]
arsen-sheverdin/CloakingNet
[ "5c2b882dcff3c6347eeacb53270690fb6fefa301" ]
[ "model.py" ]
[ "\nimport torch\nfrom torch import nn\nclass NN(nn.Module):\n def __init__(self, layer_size ):\n super(NN, self).__init__()\n self.main = nn.Sequential(\n# nn.BatchNorm1d(151),\n nn.Linear(151,layer_size),\n nn.LeakyReLU(0.3),\n# nn.BatchNorm1d(self.fc_size),\n# nn.Dropout(p=0.5),\n nn.Linear(layer_size,layer_size),\n nn.LeakyReLU(0.3),\n# nn.BatchNorm1d(self.fc_size),\n# nn.Dropout(p=0.5),\n nn.Linear(layer_size,layer_size),\n nn.LeakyReLU(0.3),\n# nn.Dropout(p=0.5),\n# nn.BatchNorm1d(self.fc_size),\n nn.Linear(layer_size,5),\n# nn.LeakyReLU(0.3),\n# nn.BatchNorm1d(5),\n )\n \n def forward(self, input):\n return self.main(input)\n\ndef init_weights(m):\n if type(m) == nn.Linear:\n torch.nn.init.xavier_uniform(m.weight)\n m.bias.data.fill_(0.01)\n if type(m) == nn.Conv2d:\n torch.nn.init.xavier_uniform(m.weight)\n m.bias.data.fill_(0.01)\n\ndef model_complexity(model):\n pytorch_total_params = sum(p.numel() for p in model.parameters())\n print('Total: \\t\\t', pytorch_total_params/10**6)\n pytorch_train_params = sum(p.numel() for p in model.parameters() if p.requires_grad==True)\n print('Trainable:\\t\\t', pytorch_train_params/10**6)\n return (pytorch_train_params/10**6)" ]
[ [ "torch.nn.init.xavier_uniform", "torch.nn.LeakyReLU", "torch.nn.Linear" ] ]
etspaceman/spark
[ "155a67d00cb2f12aad179f6df2d992feca8e003e" ]
[ "python/pyspark/sql/dataframe.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport sys\nimport random\n\nif sys.version >= '3':\n basestring = unicode = str\n long = int\n from functools import reduce\n from html import escape as html_escape\nelse:\n from itertools import imap as map\n from cgi import escape as html_escape\n\nimport warnings\n\nfrom pyspark import copy_func, since, _NoValue\nfrom pyspark.rdd import RDD, _load_from_socket, _local_iterator_from_socket, \\\n ignore_unicode_prefix, PythonEvalType\nfrom pyspark.serializers import ArrowCollectSerializer, BatchedSerializer, PickleSerializer, \\\n UTF8Deserializer\nfrom pyspark.storagelevel import StorageLevel\nfrom pyspark.traceback_utils import SCCallSiteSync\nfrom pyspark.sql.types import _parse_datatype_json_string\nfrom pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column\nfrom pyspark.sql.readwriter import DataFrameWriter\nfrom pyspark.sql.streaming import DataStreamWriter\nfrom pyspark.sql.types import IntegralType\nfrom pyspark.sql.types import *\nfrom pyspark.util import _exception_message\n\n__all__ = [\"DataFrame\", \"DataFrameNaFunctions\", \"DataFrameStatFunctions\"]\n\n\nclass DataFrame(object):\n \"\"\"A distributed collection of data grouped into named columns.\n\n A :class:`DataFrame` is equivalent to a relational table in Spark SQL,\n and can be created using various functions in :class:`SparkSession`::\n\n people = spark.read.parquet(\"...\")\n\n Once created, it can be manipulated using the various domain-specific-language\n (DSL) functions defined in: :class:`DataFrame`, :class:`Column`.\n\n To select a column from the data frame, use the apply method::\n\n ageCol = people.age\n\n A more concrete example::\n\n # To create DataFrame using SparkSession\n people = spark.read.parquet(\"...\")\n department = spark.read.parquet(\"...\")\n\n people.filter(people.age > 30).join(department, people.deptId == department.id) \\\\\n .groupBy(department.name, \"gender\").agg({\"salary\": \"avg\", \"age\": \"max\"})\n\n .. versionadded:: 1.3\n \"\"\"\n\n def __init__(self, jdf, sql_ctx):\n self._jdf = jdf\n self.sql_ctx = sql_ctx\n self._sc = sql_ctx and sql_ctx._sc\n self.is_cached = False\n self._schema = None # initialized lazily\n self._lazy_rdd = None\n # Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice\n # by __repr__ and _repr_html_ while eager evaluation opened.\n self._support_repr_html = False\n\n @property\n @since(1.3)\n def rdd(self):\n \"\"\"Returns the content as an :class:`pyspark.RDD` of :class:`Row`.\n \"\"\"\n if self._lazy_rdd is None:\n jrdd = self._jdf.javaToPython()\n self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))\n return self._lazy_rdd\n\n @property\n @since(\"1.3.1\")\n def na(self):\n \"\"\"Returns a :class:`DataFrameNaFunctions` for handling missing values.\n \"\"\"\n return DataFrameNaFunctions(self)\n\n @property\n @since(1.4)\n def stat(self):\n \"\"\"Returns a :class:`DataFrameStatFunctions` for statistic functions.\n \"\"\"\n return DataFrameStatFunctions(self)\n\n @ignore_unicode_prefix\n @since(1.3)\n def toJSON(self, use_unicode=True):\n \"\"\"Converts a :class:`DataFrame` into a :class:`RDD` of string.\n\n Each row is turned into a JSON document as one element in the returned RDD.\n\n >>> df.toJSON().first()\n u'{\"age\":2,\"name\":\"Alice\"}'\n \"\"\"\n rdd = self._jdf.toJSON()\n return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))\n\n @since(2.0)\n def createTempView(self, name):\n \"\"\"Creates a local temporary view with this DataFrame.\n\n The lifetime of this temporary table is tied to the :class:`SparkSession`\n that was used to create this :class:`DataFrame`.\n throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the\n catalog.\n\n >>> df.createTempView(\"people\")\n >>> df2 = spark.sql(\"select * from people\")\n >>> sorted(df.collect()) == sorted(df2.collect())\n True\n >>> df.createTempView(\"people\") # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n AnalysisException: u\"Temporary table 'people' already exists;\"\n >>> spark.catalog.dropTempView(\"people\")\n\n \"\"\"\n self._jdf.createTempView(name)\n\n @since(2.0)\n def createOrReplaceTempView(self, name):\n \"\"\"Creates or replaces a local temporary view with this DataFrame.\n\n The lifetime of this temporary table is tied to the :class:`SparkSession`\n that was used to create this :class:`DataFrame`.\n\n >>> df.createOrReplaceTempView(\"people\")\n >>> df2 = df.filter(df.age > 3)\n >>> df2.createOrReplaceTempView(\"people\")\n >>> df3 = spark.sql(\"select * from people\")\n >>> sorted(df3.collect()) == sorted(df2.collect())\n True\n >>> spark.catalog.dropTempView(\"people\")\n\n \"\"\"\n self._jdf.createOrReplaceTempView(name)\n\n @since(2.1)\n def createGlobalTempView(self, name):\n \"\"\"Creates a global temporary view with this DataFrame.\n\n The lifetime of this temporary view is tied to this Spark application.\n throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the\n catalog.\n\n >>> df.createGlobalTempView(\"people\")\n >>> df2 = spark.sql(\"select * from global_temp.people\")\n >>> sorted(df.collect()) == sorted(df2.collect())\n True\n >>> df.createGlobalTempView(\"people\") # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n AnalysisException: u\"Temporary table 'people' already exists;\"\n >>> spark.catalog.dropGlobalTempView(\"people\")\n\n \"\"\"\n self._jdf.createGlobalTempView(name)\n\n @since(2.2)\n def createOrReplaceGlobalTempView(self, name):\n \"\"\"Creates or replaces a global temporary view using the given name.\n\n The lifetime of this temporary view is tied to this Spark application.\n\n >>> df.createOrReplaceGlobalTempView(\"people\")\n >>> df2 = df.filter(df.age > 3)\n >>> df2.createOrReplaceGlobalTempView(\"people\")\n >>> df3 = spark.sql(\"select * from global_temp.people\")\n >>> sorted(df3.collect()) == sorted(df2.collect())\n True\n >>> spark.catalog.dropGlobalTempView(\"people\")\n\n \"\"\"\n self._jdf.createOrReplaceGlobalTempView(name)\n\n @property\n @since(1.4)\n def write(self):\n \"\"\"\n Interface for saving the content of the non-streaming :class:`DataFrame` out into external\n storage.\n\n :return: :class:`DataFrameWriter`\n \"\"\"\n return DataFrameWriter(self)\n\n @property\n @since(2.0)\n def writeStream(self):\n \"\"\"\n Interface for saving the content of the streaming :class:`DataFrame` out into external\n storage.\n\n .. note:: Evolving.\n\n :return: :class:`DataStreamWriter`\n \"\"\"\n return DataStreamWriter(self)\n\n @property\n @since(1.3)\n def schema(self):\n \"\"\"Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.\n\n >>> df.schema\n StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))\n \"\"\"\n if self._schema is None:\n try:\n self._schema = _parse_datatype_json_string(self._jdf.schema().json())\n except AttributeError as e:\n raise Exception(\n \"Unable to parse datatype from schema. %s\" % e)\n return self._schema\n\n @since(1.3)\n def printSchema(self):\n \"\"\"Prints out the schema in the tree format.\n\n >>> df.printSchema()\n root\n |-- age: integer (nullable = true)\n |-- name: string (nullable = true)\n <BLANKLINE>\n \"\"\"\n print(self._jdf.schema().treeString())\n\n @since(1.3)\n def explain(self, extended=False):\n \"\"\"Prints the (logical and physical) plans to the console for debugging purpose.\n\n :param extended: boolean, default ``False``. If ``False``, prints only the physical plan.\n\n >>> df.explain()\n == Physical Plan ==\n *(1) Scan ExistingRDD[age#0,name#1]\n\n >>> df.explain(True)\n == Parsed Logical Plan ==\n ...\n == Analyzed Logical Plan ==\n ...\n == Optimized Logical Plan ==\n ...\n == Physical Plan ==\n ...\n \"\"\"\n if extended:\n print(self._jdf.queryExecution().toString())\n else:\n print(self._jdf.queryExecution().simpleString())\n\n @since(2.4)\n def exceptAll(self, other):\n \"\"\"Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but\n not in another :class:`DataFrame` while preserving duplicates.\n\n This is equivalent to `EXCEPT ALL` in SQL.\n\n >>> df1 = spark.createDataFrame(\n ... [(\"a\", 1), (\"a\", 1), (\"a\", 1), (\"a\", 2), (\"b\", 3), (\"c\", 4)], [\"C1\", \"C2\"])\n >>> df2 = spark.createDataFrame([(\"a\", 1), (\"b\", 3)], [\"C1\", \"C2\"])\n\n >>> df1.exceptAll(df2).show()\n +---+---+\n | C1| C2|\n +---+---+\n | a| 1|\n | a| 1|\n | a| 2|\n | c| 4|\n +---+---+\n\n Also as standard in SQL, this function resolves columns by position (not by name).\n \"\"\"\n return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)\n\n @since(1.3)\n def isLocal(self):\n \"\"\"Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally\n (without any Spark executors).\n \"\"\"\n return self._jdf.isLocal()\n\n @property\n @since(2.0)\n def isStreaming(self):\n \"\"\"Returns true if this :class:`Dataset` contains one or more sources that continuously\n return data as it arrives. A :class:`Dataset` that reads data from a streaming source\n must be executed as a :class:`StreamingQuery` using the :func:`start` method in\n :class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or\n :func:`collect`) will throw an :class:`AnalysisException` when there is a streaming\n source present.\n\n .. note:: Evolving\n \"\"\"\n return self._jdf.isStreaming()\n\n @since(1.3)\n def show(self, n=20, truncate=True, vertical=False):\n \"\"\"Prints the first ``n`` rows to the console.\n\n :param n: Number of rows to show.\n :param truncate: If set to True, truncate strings longer than 20 chars by default.\n If set to a number greater than one, truncates long strings to length ``truncate``\n and align cells right.\n :param vertical: If set to True, print output rows vertically (one line\n per column value).\n\n >>> df\n DataFrame[age: int, name: string]\n >>> df.show()\n +---+-----+\n |age| name|\n +---+-----+\n | 2|Alice|\n | 5| Bob|\n +---+-----+\n >>> df.show(truncate=3)\n +---+----+\n |age|name|\n +---+----+\n | 2| Ali|\n | 5| Bob|\n +---+----+\n >>> df.show(vertical=True)\n -RECORD 0-----\n age | 2\n name | Alice\n -RECORD 1-----\n age | 5\n name | Bob\n \"\"\"\n if isinstance(truncate, bool) and truncate:\n print(self._jdf.showString(n, 20, vertical))\n else:\n print(self._jdf.showString(n, int(truncate), vertical))\n\n def __repr__(self):\n if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():\n vertical = False\n return self._jdf.showString(\n self.sql_ctx._conf.replEagerEvalMaxNumRows(),\n self.sql_ctx._conf.replEagerEvalTruncate(), vertical)\n else:\n return \"DataFrame[%s]\" % (\", \".join(\"%s: %s\" % c for c in self.dtypes))\n\n def _repr_html_(self):\n \"\"\"Returns a dataframe with html code when you enabled eager evaluation\n by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are\n using support eager evaluation with HTML.\n \"\"\"\n if not self._support_repr_html:\n self._support_repr_html = True\n if self.sql_ctx._conf.isReplEagerEvalEnabled():\n max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)\n sock_info = self._jdf.getRowsToPython(\n max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())\n rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))\n head = rows[0]\n row_data = rows[1:]\n has_more_data = len(row_data) > max_num_rows\n row_data = row_data[:max_num_rows]\n\n html = \"<table border='1'>\\n\"\n # generate table head\n html += \"<tr><th>%s</th></tr>\\n\" % \"</th><th>\".join(map(lambda x: html_escape(x), head))\n # generate table rows\n for row in row_data:\n html += \"<tr><td>%s</td></tr>\\n\" % \"</td><td>\".join(\n map(lambda x: html_escape(x), row))\n html += \"</table>\\n\"\n if has_more_data:\n html += \"only showing top %d %s\\n\" % (\n max_num_rows, \"row\" if max_num_rows == 1 else \"rows\")\n return html\n else:\n return None\n\n @since(2.1)\n def checkpoint(self, eager=True):\n \"\"\"Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the\n logical plan of this DataFrame, which is especially useful in iterative algorithms where the\n plan may grow exponentially. It will be saved to files inside the checkpoint\n directory set with :meth:`SparkContext.setCheckpointDir`.\n\n :param eager: Whether to checkpoint this DataFrame immediately\n\n .. note:: Experimental\n \"\"\"\n jdf = self._jdf.checkpoint(eager)\n return DataFrame(jdf, self.sql_ctx)\n\n @since(2.3)\n def localCheckpoint(self, eager=True):\n \"\"\"Returns a locally checkpointed version of this Dataset. Checkpointing can be used to\n truncate the logical plan of this DataFrame, which is especially useful in iterative\n algorithms where the plan may grow exponentially. Local checkpoints are stored in the\n executors using the caching subsystem and therefore they are not reliable.\n\n :param eager: Whether to checkpoint this DataFrame immediately\n\n .. note:: Experimental\n \"\"\"\n jdf = self._jdf.localCheckpoint(eager)\n return DataFrame(jdf, self.sql_ctx)\n\n @since(2.1)\n def withWatermark(self, eventTime, delayThreshold):\n \"\"\"Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point\n in time before which we assume no more late data is going to arrive.\n\n Spark will use this watermark for several purposes:\n - To know when a given time window aggregation can be finalized and thus can be emitted\n when using output modes that do not allow updates.\n\n - To minimize the amount of state that we need to keep for on-going aggregations.\n\n The current watermark is computed by looking at the `MAX(eventTime)` seen across\n all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost\n of coordinating this value across partitions, the actual watermark used is only guaranteed\n to be at least `delayThreshold` behind the actual event time. In some cases we may still\n process records that arrive more than `delayThreshold` late.\n\n :param eventTime: the name of the column that contains the event time of the row.\n :param delayThreshold: the minimum delay to wait to data to arrive late, relative to the\n latest record that has been processed in the form of an interval\n (e.g. \"1 minute\" or \"5 hours\").\n\n .. note:: Evolving\n\n >>> sdf.select('name', sdf.time.cast('timestamp')).withWatermark('time', '10 minutes')\n DataFrame[name: string, time: timestamp]\n \"\"\"\n if not eventTime or type(eventTime) is not str:\n raise TypeError(\"eventTime should be provided as a string\")\n if not delayThreshold or type(delayThreshold) is not str:\n raise TypeError(\"delayThreshold should be provided as a string interval\")\n jdf = self._jdf.withWatermark(eventTime, delayThreshold)\n return DataFrame(jdf, self.sql_ctx)\n\n @since(2.2)\n def hint(self, name, *parameters):\n \"\"\"Specifies some hint on the current DataFrame.\n\n :param name: A name of the hint.\n :param parameters: Optional parameters.\n :return: :class:`DataFrame`\n\n >>> df.join(df2.hint(\"broadcast\"), \"name\").show()\n +----+---+------+\n |name|age|height|\n +----+---+------+\n | Bob| 5| 85|\n +----+---+------+\n \"\"\"\n if len(parameters) == 1 and isinstance(parameters[0], list):\n parameters = parameters[0]\n\n if not isinstance(name, str):\n raise TypeError(\"name should be provided as str, got {0}\".format(type(name)))\n\n allowed_types = (basestring, list, float, int)\n for p in parameters:\n if not isinstance(p, allowed_types):\n raise TypeError(\n \"all parameters should be in {0}, got {1} of type {2}\".format(\n allowed_types, p, type(p)))\n\n jdf = self._jdf.hint(name, self._jseq(parameters))\n return DataFrame(jdf, self.sql_ctx)\n\n @since(1.3)\n def count(self):\n \"\"\"Returns the number of rows in this :class:`DataFrame`.\n\n >>> df.count()\n 2\n \"\"\"\n return int(self._jdf.count())\n\n @ignore_unicode_prefix\n @since(1.3)\n def collect(self):\n \"\"\"Returns all the records as a list of :class:`Row`.\n\n >>> df.collect()\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n \"\"\"\n with SCCallSiteSync(self._sc) as css:\n sock_info = self._jdf.collectToPython()\n return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))\n\n @ignore_unicode_prefix\n @since(2.0)\n def toLocalIterator(self, prefetchPartitions=False):\n \"\"\"\n Returns an iterator that contains all of the rows in this :class:`DataFrame`.\n The iterator will consume as much memory as the largest partition in this DataFrame.\n With prefetch it may consume up to the memory of the 2 largest partitions.\n\n :param prefetchPartitions: If Spark should pre-fetch the next partition\n before it is needed.\n\n >>> list(df.toLocalIterator())\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n \"\"\"\n with SCCallSiteSync(self._sc) as css:\n sock_info = self._jdf.toPythonIterator(prefetchPartitions)\n return _local_iterator_from_socket(sock_info, BatchedSerializer(PickleSerializer()))\n\n @ignore_unicode_prefix\n @since(1.3)\n def limit(self, num):\n \"\"\"Limits the result count to the number specified.\n\n >>> df.limit(1).collect()\n [Row(age=2, name=u'Alice')]\n >>> df.limit(0).collect()\n []\n \"\"\"\n jdf = self._jdf.limit(num)\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def take(self, num):\n \"\"\"Returns the first ``num`` rows as a :class:`list` of :class:`Row`.\n\n >>> df.take(2)\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n \"\"\"\n return self.limit(num).collect()\n\n @since(1.3)\n def foreach(self, f):\n \"\"\"Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.\n\n This is a shorthand for ``df.rdd.foreach()``.\n\n >>> def f(person):\n ... print(person.name)\n >>> df.foreach(f)\n \"\"\"\n self.rdd.foreach(f)\n\n @since(1.3)\n def foreachPartition(self, f):\n \"\"\"Applies the ``f`` function to each partition of this :class:`DataFrame`.\n\n This a shorthand for ``df.rdd.foreachPartition()``.\n\n >>> def f(people):\n ... for person in people:\n ... print(person.name)\n >>> df.foreachPartition(f)\n \"\"\"\n self.rdd.foreachPartition(f)\n\n @since(1.3)\n def cache(self):\n \"\"\"Persists the :class:`DataFrame` with the default storage level (`MEMORY_AND_DISK`).\n\n .. note:: The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.\n \"\"\"\n self.is_cached = True\n self._jdf.cache()\n return self\n\n @since(1.3)\n def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):\n \"\"\"Sets the storage level to persist the contents of the :class:`DataFrame` across\n operations after the first time it is computed. This can only be used to assign\n a new storage level if the :class:`DataFrame` does not have a storage level set yet.\n If no storage level is specified defaults to (`MEMORY_AND_DISK`).\n\n .. note:: The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.\n \"\"\"\n self.is_cached = True\n javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)\n self._jdf.persist(javaStorageLevel)\n return self\n\n @property\n @since(2.1)\n def storageLevel(self):\n \"\"\"Get the :class:`DataFrame`'s current storage level.\n\n >>> df.storageLevel\n StorageLevel(False, False, False, False, 1)\n >>> df.cache().storageLevel\n StorageLevel(True, True, False, True, 1)\n >>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel\n StorageLevel(True, False, False, False, 2)\n \"\"\"\n java_storage_level = self._jdf.storageLevel()\n storage_level = StorageLevel(java_storage_level.useDisk(),\n java_storage_level.useMemory(),\n java_storage_level.useOffHeap(),\n java_storage_level.deserialized(),\n java_storage_level.replication())\n return storage_level\n\n @since(1.3)\n def unpersist(self, blocking=False):\n \"\"\"Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from\n memory and disk.\n\n .. note:: `blocking` default has changed to False to match Scala in 2.0.\n \"\"\"\n self.is_cached = False\n self._jdf.unpersist(blocking)\n return self\n\n @since(1.4)\n def coalesce(self, numPartitions):\n \"\"\"\n Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.\n\n :param numPartitions: int, to specify the target number of partitions\n\n Similar to coalesce defined on an :class:`RDD`, this operation results in a\n narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,\n there will not be a shuffle, instead each of the 100 new partitions will\n claim 10 of the current partitions. If a larger number of partitions is requested,\n it will stay at the current number of partitions.\n\n However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,\n this may result in your computation taking place on fewer nodes than\n you like (e.g. one node in the case of numPartitions = 1). To avoid this,\n you can call repartition(). This will add a shuffle step, but means the\n current upstream partitions will be executed in parallel (per whatever\n the current partitioning is).\n\n >>> df.coalesce(1).rdd.getNumPartitions()\n 1\n \"\"\"\n return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)\n\n @since(1.3)\n def repartition(self, numPartitions, *cols):\n \"\"\"\n Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The\n resulting DataFrame is hash partitioned.\n\n :param numPartitions:\n can be an int to specify the target number of partitions or a Column.\n If it is a Column, it will be used as the first partitioning column. If not specified,\n the default number of partitions is used.\n\n .. versionchanged:: 1.6\n Added optional arguments to specify the partitioning columns. Also made numPartitions\n optional if partitioning columns are specified.\n\n >>> df.repartition(10).rdd.getNumPartitions()\n 10\n >>> data = df.union(df).repartition(\"age\")\n >>> data.show()\n +---+-----+\n |age| name|\n +---+-----+\n | 5| Bob|\n | 5| Bob|\n | 2|Alice|\n | 2|Alice|\n +---+-----+\n >>> data = data.repartition(7, \"age\")\n >>> data.show()\n +---+-----+\n |age| name|\n +---+-----+\n | 2|Alice|\n | 5| Bob|\n | 2|Alice|\n | 5| Bob|\n +---+-----+\n >>> data.rdd.getNumPartitions()\n 7\n >>> data = data.repartition(\"name\", \"age\")\n >>> data.show()\n +---+-----+\n |age| name|\n +---+-----+\n | 5| Bob|\n | 5| Bob|\n | 2|Alice|\n | 2|Alice|\n +---+-----+\n \"\"\"\n if isinstance(numPartitions, int):\n if len(cols) == 0:\n return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)\n else:\n return DataFrame(\n self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)\n elif isinstance(numPartitions, (basestring, Column)):\n cols = (numPartitions, ) + cols\n return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)\n else:\n raise TypeError(\"numPartitions should be an int or Column\")\n\n @since(\"2.4.0\")\n def repartitionByRange(self, numPartitions, *cols):\n \"\"\"\n Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The\n resulting DataFrame is range partitioned.\n\n :param numPartitions:\n can be an int to specify the target number of partitions or a Column.\n If it is a Column, it will be used as the first partitioning column. If not specified,\n the default number of partitions is used.\n\n At least one partition-by expression must be specified.\n When no explicit sort order is specified, \"ascending nulls first\" is assumed.\n\n Note that due to performance reasons this method uses sampling to estimate the ranges.\n Hence, the output may not be consistent, since sampling can return different values.\n The sample size can be controlled by the config\n `spark.sql.execution.rangeExchange.sampleSizePerPartition`.\n\n >>> df.repartitionByRange(2, \"age\").rdd.getNumPartitions()\n 2\n >>> df.show()\n +---+-----+\n |age| name|\n +---+-----+\n | 2|Alice|\n | 5| Bob|\n +---+-----+\n >>> df.repartitionByRange(1, \"age\").rdd.getNumPartitions()\n 1\n >>> data = df.repartitionByRange(\"age\")\n >>> df.show()\n +---+-----+\n |age| name|\n +---+-----+\n | 2|Alice|\n | 5| Bob|\n +---+-----+\n \"\"\"\n if isinstance(numPartitions, int):\n if len(cols) == 0:\n return ValueError(\"At least one partition-by expression must be specified.\")\n else:\n return DataFrame(\n self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)\n elif isinstance(numPartitions, (basestring, Column)):\n cols = (numPartitions,) + cols\n return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)\n else:\n raise TypeError(\"numPartitions should be an int, string or Column\")\n\n @since(1.3)\n def distinct(self):\n \"\"\"Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.\n\n >>> df.distinct().count()\n 2\n \"\"\"\n return DataFrame(self._jdf.distinct(), self.sql_ctx)\n\n @since(1.3)\n def sample(self, withReplacement=None, fraction=None, seed=None):\n \"\"\"Returns a sampled subset of this :class:`DataFrame`.\n\n :param withReplacement: Sample with replacement or not (default False).\n :param fraction: Fraction of rows to generate, range [0.0, 1.0].\n :param seed: Seed for sampling (default a random seed).\n\n .. note:: This is not guaranteed to provide exactly the fraction specified of the total\n count of the given :class:`DataFrame`.\n\n .. note:: `fraction` is required and, `withReplacement` and `seed` are optional.\n\n >>> df = spark.range(10)\n >>> df.sample(0.5, 3).count()\n 7\n >>> df.sample(fraction=0.5, seed=3).count()\n 7\n >>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()\n 1\n >>> df.sample(1.0).count()\n 10\n >>> df.sample(fraction=1.0).count()\n 10\n >>> df.sample(False, fraction=1.0).count()\n 10\n \"\"\"\n\n # For the cases below:\n # sample(True, 0.5 [, seed])\n # sample(True, fraction=0.5 [, seed])\n # sample(withReplacement=False, fraction=0.5 [, seed])\n is_withReplacement_set = \\\n type(withReplacement) == bool and isinstance(fraction, float)\n\n # For the case below:\n # sample(faction=0.5 [, seed])\n is_withReplacement_omitted_kwargs = \\\n withReplacement is None and isinstance(fraction, float)\n\n # For the case below:\n # sample(0.5 [, seed])\n is_withReplacement_omitted_args = isinstance(withReplacement, float)\n\n if not (is_withReplacement_set\n or is_withReplacement_omitted_kwargs\n or is_withReplacement_omitted_args):\n argtypes = [\n str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]\n raise TypeError(\n \"withReplacement (optional), fraction (required) and seed (optional)\"\n \" should be a bool, float and number; however, \"\n \"got [%s].\" % \", \".join(argtypes))\n\n if is_withReplacement_omitted_args:\n if fraction is not None:\n seed = fraction\n fraction = withReplacement\n withReplacement = None\n\n seed = long(seed) if seed is not None else None\n args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]\n jdf = self._jdf.sample(*args)\n return DataFrame(jdf, self.sql_ctx)\n\n @since(1.5)\n def sampleBy(self, col, fractions, seed=None):\n \"\"\"\n Returns a stratified sample without replacement based on the\n fraction given on each stratum.\n\n :param col: column that defines strata\n :param fractions:\n sampling fraction for each stratum. If a stratum is not\n specified, we treat its fraction as zero.\n :param seed: random seed\n :return: a new DataFrame that represents the stratified sample\n\n >>> from pyspark.sql.functions import col\n >>> dataset = sqlContext.range(0, 100).select((col(\"id\") % 3).alias(\"key\"))\n >>> sampled = dataset.sampleBy(\"key\", fractions={0: 0.1, 1: 0.2}, seed=0)\n >>> sampled.groupBy(\"key\").count().orderBy(\"key\").show()\n +---+-----+\n |key|count|\n +---+-----+\n | 0| 3|\n | 1| 6|\n +---+-----+\n >>> dataset.sampleBy(col(\"key\"), fractions={2: 1.0}, seed=0).count()\n 33\n\n .. versionchanged:: 3.0\n Added sampling by a column of :class:`Column`\n \"\"\"\n if isinstance(col, basestring):\n col = Column(col)\n elif not isinstance(col, Column):\n raise ValueError(\"col must be a string or a column, but got %r\" % type(col))\n if not isinstance(fractions, dict):\n raise ValueError(\"fractions must be a dict but got %r\" % type(fractions))\n for k, v in fractions.items():\n if not isinstance(k, (float, int, long, basestring)):\n raise ValueError(\"key must be float, int, long, or string, but got %r\" % type(k))\n fractions[k] = float(v)\n col = col._jc\n seed = seed if seed is not None else random.randint(0, sys.maxsize)\n return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)\n\n @since(1.4)\n def randomSplit(self, weights, seed=None):\n \"\"\"Randomly splits this :class:`DataFrame` with the provided weights.\n\n :param weights: list of doubles as weights with which to split the DataFrame. Weights will\n be normalized if they don't sum up to 1.0.\n :param seed: The seed for sampling.\n\n >>> splits = df4.randomSplit([1.0, 2.0], 24)\n >>> splits[0].count()\n 2\n\n >>> splits[1].count()\n 2\n \"\"\"\n for w in weights:\n if w < 0.0:\n raise ValueError(\"Weights must be positive. Found weight value: %s\" % w)\n seed = seed if seed is not None else random.randint(0, sys.maxsize)\n rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))\n return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]\n\n @property\n @since(1.3)\n def dtypes(self):\n \"\"\"Returns all column names and their data types as a list.\n\n >>> df.dtypes\n [('age', 'int'), ('name', 'string')]\n \"\"\"\n return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]\n\n @property\n @since(1.3)\n def columns(self):\n \"\"\"Returns all column names as a list.\n\n >>> df.columns\n ['age', 'name']\n \"\"\"\n return [f.name for f in self.schema.fields]\n\n @since(2.3)\n def colRegex(self, colName):\n \"\"\"\n Selects column based on the column name specified as a regex and returns it\n as :class:`Column`.\n\n :param colName: string, column name specified as a regex.\n\n >>> df = spark.createDataFrame([(\"a\", 1), (\"b\", 2), (\"c\", 3)], [\"Col1\", \"Col2\"])\n >>> df.select(df.colRegex(\"`(Col1)?+.+`\")).show()\n +----+\n |Col2|\n +----+\n | 1|\n | 2|\n | 3|\n +----+\n \"\"\"\n if not isinstance(colName, basestring):\n raise ValueError(\"colName should be provided as string\")\n jc = self._jdf.colRegex(colName)\n return Column(jc)\n\n @ignore_unicode_prefix\n @since(1.3)\n def alias(self, alias):\n \"\"\"Returns a new :class:`DataFrame` with an alias set.\n\n :param alias: string, an alias name to be set for the DataFrame.\n\n >>> from pyspark.sql.functions import *\n >>> df_as1 = df.alias(\"df_as1\")\n >>> df_as2 = df.alias(\"df_as2\")\n >>> joined_df = df_as1.join(df_as2, col(\"df_as1.name\") == col(\"df_as2.name\"), 'inner')\n >>> joined_df.select(\"df_as1.name\", \"df_as2.name\", \"df_as2.age\").collect()\n [Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]\n \"\"\"\n assert isinstance(alias, basestring), \"alias should be a string\"\n return DataFrame(getattr(self._jdf, \"as\")(alias), self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(2.1)\n def crossJoin(self, other):\n \"\"\"Returns the cartesian product with another :class:`DataFrame`.\n\n :param other: Right side of the cartesian product.\n\n >>> df.select(\"age\", \"name\").collect()\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n >>> df2.select(\"name\", \"height\").collect()\n [Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]\n >>> df.crossJoin(df2.select(\"height\")).select(\"age\", \"name\", \"height\").collect()\n [Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),\n Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]\n \"\"\"\n\n jdf = self._jdf.crossJoin(other._jdf)\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def join(self, other, on=None, how=None):\n \"\"\"Joins with another :class:`DataFrame`, using the given join expression.\n\n :param other: Right side of the join\n :param on: a string for the join column name, a list of column names,\n a join expression (Column), or a list of Columns.\n If `on` is a string or a list of strings indicating the name of the join column(s),\n the column(s) must exist on both sides, and this performs an equi-join.\n :param how: str, default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,\n ``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,\n ``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,\n ``anti``, ``leftanti`` and ``left_anti``.\n\n The following performs a full outer join between ``df1`` and ``df2``.\n\n >>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect()\n [Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]\n\n >>> df.join(df2, 'name', 'outer').select('name', 'height').collect()\n [Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]\n\n >>> cond = [df.name == df3.name, df.age == df3.age]\n >>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()\n [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]\n\n >>> df.join(df2, 'name').select(df.name, df2.height).collect()\n [Row(name=u'Bob', height=85)]\n\n >>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()\n [Row(name=u'Bob', age=5)]\n \"\"\"\n\n if on is not None and not isinstance(on, list):\n on = [on]\n\n if on is not None:\n if isinstance(on[0], basestring):\n on = self._jseq(on)\n else:\n assert isinstance(on[0], Column), \"on should be Column or list of Column\"\n on = reduce(lambda x, y: x.__and__(y), on)\n on = on._jc\n\n if on is None and how is None:\n jdf = self._jdf.join(other._jdf)\n else:\n if how is None:\n how = \"inner\"\n if on is None:\n on = self._jseq([])\n assert isinstance(how, basestring), \"how should be basestring\"\n jdf = self._jdf.join(other._jdf, on, how)\n return DataFrame(jdf, self.sql_ctx)\n\n @since(1.6)\n def sortWithinPartitions(self, *cols, **kwargs):\n \"\"\"Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).\n\n :param cols: list of :class:`Column` or column names to sort by.\n :param ascending: boolean or list of boolean (default True).\n Sort ascending vs. descending. Specify list for multiple sort orders.\n If a list is specified, length of the list must equal length of the `cols`.\n\n >>> df.sortWithinPartitions(\"age\", ascending=False).show()\n +---+-----+\n |age| name|\n +---+-----+\n | 2|Alice|\n | 5| Bob|\n +---+-----+\n \"\"\"\n jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def sort(self, *cols, **kwargs):\n \"\"\"Returns a new :class:`DataFrame` sorted by the specified column(s).\n\n :param cols: list of :class:`Column` or column names to sort by.\n :param ascending: boolean or list of boolean (default True).\n Sort ascending vs. descending. Specify list for multiple sort orders.\n If a list is specified, length of the list must equal length of the `cols`.\n\n >>> df.sort(df.age.desc()).collect()\n [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]\n >>> df.sort(\"age\", ascending=False).collect()\n [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]\n >>> df.orderBy(df.age.desc()).collect()\n [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]\n >>> from pyspark.sql.functions import *\n >>> df.sort(asc(\"age\")).collect()\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n >>> df.orderBy(desc(\"age\"), \"name\").collect()\n [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]\n >>> df.orderBy([\"age\", \"name\"], ascending=[0, 1]).collect()\n [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]\n \"\"\"\n jdf = self._jdf.sort(self._sort_cols(cols, kwargs))\n return DataFrame(jdf, self.sql_ctx)\n\n orderBy = sort\n\n def _jseq(self, cols, converter=None):\n \"\"\"Return a JVM Seq of Columns from a list of Column or names\"\"\"\n return _to_seq(self.sql_ctx._sc, cols, converter)\n\n def _jmap(self, jm):\n \"\"\"Return a JVM Scala Map from a dict\"\"\"\n return _to_scala_map(self.sql_ctx._sc, jm)\n\n def _jcols(self, *cols):\n \"\"\"Return a JVM Seq of Columns from a list of Column or column names\n\n If `cols` has only one list in it, cols[0] will be used as the list.\n \"\"\"\n if len(cols) == 1 and isinstance(cols[0], list):\n cols = cols[0]\n return self._jseq(cols, _to_java_column)\n\n def _sort_cols(self, cols, kwargs):\n \"\"\" Return a JVM Seq of Columns that describes the sort order\n \"\"\"\n if not cols:\n raise ValueError(\"should sort by at least one column\")\n if len(cols) == 1 and isinstance(cols[0], list):\n cols = cols[0]\n jcols = [_to_java_column(c) for c in cols]\n ascending = kwargs.get('ascending', True)\n if isinstance(ascending, (bool, int)):\n if not ascending:\n jcols = [jc.desc() for jc in jcols]\n elif isinstance(ascending, list):\n jcols = [jc if asc else jc.desc()\n for asc, jc in zip(ascending, jcols)]\n else:\n raise TypeError(\"ascending can only be boolean or list, but got %s\" % type(ascending))\n return self._jseq(jcols)\n\n @since(\"1.3.1\")\n def describe(self, *cols):\n \"\"\"Computes basic statistics for numeric and string columns.\n\n This include count, mean, stddev, min, and max. If no columns are\n given, this function computes statistics for all numerical or string columns.\n\n .. note:: This function is meant for exploratory data analysis, as we make no\n guarantee about the backward compatibility of the schema of the resulting DataFrame.\n\n >>> df.describe(['age']).show()\n +-------+------------------+\n |summary| age|\n +-------+------------------+\n | count| 2|\n | mean| 3.5|\n | stddev|2.1213203435596424|\n | min| 2|\n | max| 5|\n +-------+------------------+\n >>> df.describe().show()\n +-------+------------------+-----+\n |summary| age| name|\n +-------+------------------+-----+\n | count| 2| 2|\n | mean| 3.5| null|\n | stddev|2.1213203435596424| null|\n | min| 2|Alice|\n | max| 5| Bob|\n +-------+------------------+-----+\n\n Use summary for expanded statistics and control over which statistics to compute.\n \"\"\"\n if len(cols) == 1 and isinstance(cols[0], list):\n cols = cols[0]\n jdf = self._jdf.describe(self._jseq(cols))\n return DataFrame(jdf, self.sql_ctx)\n\n @since(\"2.3.0\")\n def summary(self, *statistics):\n \"\"\"Computes specified statistics for numeric and string columns. Available statistics are:\n - count\n - mean\n - stddev\n - min\n - max\n - arbitrary approximate percentiles specified as a percentage (eg, 75%)\n\n If no statistics are given, this function computes count, mean, stddev, min,\n approximate quartiles (percentiles at 25%, 50%, and 75%), and max.\n\n .. note:: This function is meant for exploratory data analysis, as we make no\n guarantee about the backward compatibility of the schema of the resulting DataFrame.\n\n >>> df.summary().show()\n +-------+------------------+-----+\n |summary| age| name|\n +-------+------------------+-----+\n | count| 2| 2|\n | mean| 3.5| null|\n | stddev|2.1213203435596424| null|\n | min| 2|Alice|\n | 25%| 2| null|\n | 50%| 2| null|\n | 75%| 5| null|\n | max| 5| Bob|\n +-------+------------------+-----+\n\n >>> df.summary(\"count\", \"min\", \"25%\", \"75%\", \"max\").show()\n +-------+---+-----+\n |summary|age| name|\n +-------+---+-----+\n | count| 2| 2|\n | min| 2|Alice|\n | 25%| 2| null|\n | 75%| 5| null|\n | max| 5| Bob|\n +-------+---+-----+\n\n To do a summary for specific columns first select them:\n\n >>> df.select(\"age\", \"name\").summary(\"count\").show()\n +-------+---+----+\n |summary|age|name|\n +-------+---+----+\n | count| 2| 2|\n +-------+---+----+\n\n See also describe for basic statistics.\n \"\"\"\n if len(statistics) == 1 and isinstance(statistics[0], list):\n statistics = statistics[0]\n jdf = self._jdf.summary(self._jseq(statistics))\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def head(self, n=None):\n \"\"\"Returns the first ``n`` rows.\n\n .. note:: This method should only be used if the resulting array is expected\n to be small, as all the data is loaded into the driver's memory.\n\n :param n: int, default 1. Number of rows to return.\n :return: If n is greater than 1, return a list of :class:`Row`.\n If n is 1, return a single Row.\n\n >>> df.head()\n Row(age=2, name=u'Alice')\n >>> df.head(1)\n [Row(age=2, name=u'Alice')]\n \"\"\"\n if n is None:\n rs = self.head(1)\n return rs[0] if rs else None\n return self.take(n)\n\n @ignore_unicode_prefix\n @since(1.3)\n def first(self):\n \"\"\"Returns the first row as a :class:`Row`.\n\n >>> df.first()\n Row(age=2, name=u'Alice')\n \"\"\"\n return self.head()\n\n @ignore_unicode_prefix\n @since(1.3)\n def __getitem__(self, item):\n \"\"\"Returns the column as a :class:`Column`.\n\n >>> df.select(df['age']).collect()\n [Row(age=2), Row(age=5)]\n >>> df[ [\"name\", \"age\"]].collect()\n [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]\n >>> df[ df.age > 3 ].collect()\n [Row(age=5, name=u'Bob')]\n >>> df[df[0] > 3].collect()\n [Row(age=5, name=u'Bob')]\n \"\"\"\n if isinstance(item, basestring):\n jc = self._jdf.apply(item)\n return Column(jc)\n elif isinstance(item, Column):\n return self.filter(item)\n elif isinstance(item, (list, tuple)):\n return self.select(*item)\n elif isinstance(item, int):\n jc = self._jdf.apply(self.columns[item])\n return Column(jc)\n else:\n raise TypeError(\"unexpected item type: %s\" % type(item))\n\n @since(1.3)\n def __getattr__(self, name):\n \"\"\"Returns the :class:`Column` denoted by ``name``.\n\n >>> df.select(df.age).collect()\n [Row(age=2), Row(age=5)]\n \"\"\"\n if name not in self.columns:\n raise AttributeError(\n \"'%s' object has no attribute '%s'\" % (self.__class__.__name__, name))\n jc = self._jdf.apply(name)\n return Column(jc)\n\n @ignore_unicode_prefix\n @since(1.3)\n def select(self, *cols):\n \"\"\"Projects a set of expressions and returns a new :class:`DataFrame`.\n\n :param cols: list of column names (string) or expressions (:class:`Column`).\n If one of the column names is '*', that column is expanded to include all columns\n in the current DataFrame.\n\n >>> df.select('*').collect()\n [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]\n >>> df.select('name', 'age').collect()\n [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]\n >>> df.select(df.name, (df.age + 10).alias('age')).collect()\n [Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]\n \"\"\"\n jdf = self._jdf.select(self._jcols(*cols))\n return DataFrame(jdf, self.sql_ctx)\n\n @since(1.3)\n def selectExpr(self, *expr):\n \"\"\"Projects a set of SQL expressions and returns a new :class:`DataFrame`.\n\n This is a variant of :func:`select` that accepts SQL expressions.\n\n >>> df.selectExpr(\"age * 2\", \"abs(age)\").collect()\n [Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]\n \"\"\"\n if len(expr) == 1 and isinstance(expr[0], list):\n expr = expr[0]\n jdf = self._jdf.selectExpr(self._jseq(expr))\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def filter(self, condition):\n \"\"\"Filters rows using the given condition.\n\n :func:`where` is an alias for :func:`filter`.\n\n :param condition: a :class:`Column` of :class:`types.BooleanType`\n or a string of SQL expression.\n\n >>> df.filter(df.age > 3).collect()\n [Row(age=5, name=u'Bob')]\n >>> df.where(df.age == 2).collect()\n [Row(age=2, name=u'Alice')]\n\n >>> df.filter(\"age > 3\").collect()\n [Row(age=5, name=u'Bob')]\n >>> df.where(\"age = 2\").collect()\n [Row(age=2, name=u'Alice')]\n \"\"\"\n if isinstance(condition, basestring):\n jdf = self._jdf.filter(condition)\n elif isinstance(condition, Column):\n jdf = self._jdf.filter(condition._jc)\n else:\n raise TypeError(\"condition should be string or Column\")\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def groupBy(self, *cols):\n \"\"\"Groups the :class:`DataFrame` using the specified columns,\n so we can run aggregation on them. See :class:`GroupedData`\n for all the available aggregate functions.\n\n :func:`groupby` is an alias for :func:`groupBy`.\n\n :param cols: list of columns to group by.\n Each element should be a column name (string) or an expression (:class:`Column`).\n\n >>> df.groupBy().avg().collect()\n [Row(avg(age)=3.5)]\n >>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())\n [Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]\n >>> sorted(df.groupBy(df.name).avg().collect())\n [Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]\n >>> sorted(df.groupBy(['name', df.age]).count().collect())\n [Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]\n \"\"\"\n jgd = self._jdf.groupBy(self._jcols(*cols))\n from pyspark.sql.group import GroupedData\n return GroupedData(jgd, self)\n\n @since(1.4)\n def rollup(self, *cols):\n \"\"\"\n Create a multi-dimensional rollup for the current :class:`DataFrame` using\n the specified columns, so we can run aggregation on them.\n\n >>> df.rollup(\"name\", df.age).count().orderBy(\"name\", \"age\").show()\n +-----+----+-----+\n | name| age|count|\n +-----+----+-----+\n | null|null| 2|\n |Alice|null| 1|\n |Alice| 2| 1|\n | Bob|null| 1|\n | Bob| 5| 1|\n +-----+----+-----+\n \"\"\"\n jgd = self._jdf.rollup(self._jcols(*cols))\n from pyspark.sql.group import GroupedData\n return GroupedData(jgd, self)\n\n @since(1.4)\n def cube(self, *cols):\n \"\"\"\n Create a multi-dimensional cube for the current :class:`DataFrame` using\n the specified columns, so we can run aggregation on them.\n\n >>> df.cube(\"name\", df.age).count().orderBy(\"name\", \"age\").show()\n +-----+----+-----+\n | name| age|count|\n +-----+----+-----+\n | null|null| 2|\n | null| 2| 1|\n | null| 5| 1|\n |Alice|null| 1|\n |Alice| 2| 1|\n | Bob|null| 1|\n | Bob| 5| 1|\n +-----+----+-----+\n \"\"\"\n jgd = self._jdf.cube(self._jcols(*cols))\n from pyspark.sql.group import GroupedData\n return GroupedData(jgd, self)\n\n @since(1.3)\n def agg(self, *exprs):\n \"\"\" Aggregate on the entire :class:`DataFrame` without groups\n (shorthand for ``df.groupBy.agg()``).\n\n >>> df.agg({\"age\": \"max\"}).collect()\n [Row(max(age)=5)]\n >>> from pyspark.sql import functions as F\n >>> df.agg(F.min(df.age)).collect()\n [Row(min(age)=2)]\n \"\"\"\n return self.groupBy().agg(*exprs)\n\n @since(2.0)\n def union(self, other):\n \"\"\" Return a new :class:`DataFrame` containing union of rows in this and another frame.\n\n This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union\n (that does deduplication of elements), use this function followed by :func:`distinct`.\n\n Also as standard in SQL, this function resolves columns by position (not by name).\n \"\"\"\n return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)\n\n @since(1.3)\n def unionAll(self, other):\n \"\"\" Return a new :class:`DataFrame` containing union of rows in this and another frame.\n\n This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union\n (that does deduplication of elements), use this function followed by :func:`distinct`.\n\n Also as standard in SQL, this function resolves columns by position (not by name).\n \"\"\"\n return self.union(other)\n\n @since(2.3)\n def unionByName(self, other):\n \"\"\" Returns a new :class:`DataFrame` containing union of rows in this and another frame.\n\n This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set\n union (that does deduplication of elements), use this function followed by :func:`distinct`.\n\n The difference between this function and :func:`union` is that this function\n resolves columns by name (not by position):\n\n >>> df1 = spark.createDataFrame([[1, 2, 3]], [\"col0\", \"col1\", \"col2\"])\n >>> df2 = spark.createDataFrame([[4, 5, 6]], [\"col1\", \"col2\", \"col0\"])\n >>> df1.unionByName(df2).show()\n +----+----+----+\n |col0|col1|col2|\n +----+----+----+\n | 1| 2| 3|\n | 6| 4| 5|\n +----+----+----+\n \"\"\"\n return DataFrame(self._jdf.unionByName(other._jdf), self.sql_ctx)\n\n @since(1.3)\n def intersect(self, other):\n \"\"\" Return a new :class:`DataFrame` containing rows only in\n both this frame and another frame.\n\n This is equivalent to `INTERSECT` in SQL.\n \"\"\"\n return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)\n\n @since(2.4)\n def intersectAll(self, other):\n \"\"\" Return a new :class:`DataFrame` containing rows in both this dataframe and other\n dataframe while preserving duplicates.\n\n This is equivalent to `INTERSECT ALL` in SQL.\n >>> df1 = spark.createDataFrame([(\"a\", 1), (\"a\", 1), (\"b\", 3), (\"c\", 4)], [\"C1\", \"C2\"])\n >>> df2 = spark.createDataFrame([(\"a\", 1), (\"a\", 1), (\"b\", 3)], [\"C1\", \"C2\"])\n\n >>> df1.intersectAll(df2).sort(\"C1\", \"C2\").show()\n +---+---+\n | C1| C2|\n +---+---+\n | a| 1|\n | a| 1|\n | b| 3|\n +---+---+\n\n Also as standard in SQL, this function resolves columns by position (not by name).\n \"\"\"\n return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)\n\n @since(1.3)\n def subtract(self, other):\n \"\"\" Return a new :class:`DataFrame` containing rows in this frame\n but not in another frame.\n\n This is equivalent to `EXCEPT DISTINCT` in SQL.\n\n \"\"\"\n return DataFrame(getattr(self._jdf, \"except\")(other._jdf), self.sql_ctx)\n\n @since(1.4)\n def dropDuplicates(self, subset=None):\n \"\"\"Return a new :class:`DataFrame` with duplicate rows removed,\n optionally only considering certain columns.\n\n For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming\n :class:`DataFrame`, it will keep all data across triggers as intermediate state to drop\n duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can\n be and system will accordingly limit the state. In addition, too late data older than\n watermark will be dropped to avoid any possibility of duplicates.\n\n :func:`drop_duplicates` is an alias for :func:`dropDuplicates`.\n\n >>> from pyspark.sql import Row\n >>> df = sc.parallelize([ \\\\\n ... Row(name='Alice', age=5, height=80), \\\\\n ... Row(name='Alice', age=5, height=80), \\\\\n ... Row(name='Alice', age=10, height=80)]).toDF()\n >>> df.dropDuplicates().show()\n +---+------+-----+\n |age|height| name|\n +---+------+-----+\n | 5| 80|Alice|\n | 10| 80|Alice|\n +---+------+-----+\n\n >>> df.dropDuplicates(['name', 'height']).show()\n +---+------+-----+\n |age|height| name|\n +---+------+-----+\n | 5| 80|Alice|\n +---+------+-----+\n \"\"\"\n if subset is None:\n jdf = self._jdf.dropDuplicates()\n else:\n jdf = self._jdf.dropDuplicates(self._jseq(subset))\n return DataFrame(jdf, self.sql_ctx)\n\n @since(\"1.3.1\")\n def dropna(self, how='any', thresh=None, subset=None):\n \"\"\"Returns a new :class:`DataFrame` omitting rows with null values.\n :func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.\n\n :param how: 'any' or 'all'.\n If 'any', drop a row if it contains any nulls.\n If 'all', drop a row only if all its values are null.\n :param thresh: int, default None\n If specified, drop rows that have less than `thresh` non-null values.\n This overwrites the `how` parameter.\n :param subset: optional list of column names to consider.\n\n >>> df4.na.drop().show()\n +---+------+-----+\n |age|height| name|\n +---+------+-----+\n | 10| 80|Alice|\n +---+------+-----+\n \"\"\"\n if how is not None and how not in ['any', 'all']:\n raise ValueError(\"how ('\" + how + \"') should be 'any' or 'all'\")\n\n if subset is None:\n subset = self.columns\n elif isinstance(subset, basestring):\n subset = [subset]\n elif not isinstance(subset, (list, tuple)):\n raise ValueError(\"subset should be a list or tuple of column names\")\n\n if thresh is None:\n thresh = len(subset) if how == 'any' else 1\n\n return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)\n\n @since(\"1.3.1\")\n def fillna(self, value, subset=None):\n \"\"\"Replace null values, alias for ``na.fill()``.\n :func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.\n\n :param value: int, long, float, string, bool or dict.\n Value to replace null values with.\n If the value is a dict, then `subset` is ignored and `value` must be a mapping\n from column name (string) to replacement value. The replacement value must be\n an int, long, float, boolean, or string.\n :param subset: optional list of column names to consider.\n Columns specified in subset that do not have matching data type are ignored.\n For example, if `value` is a string, and subset contains a non-string column,\n then the non-string column is simply ignored.\n\n >>> df4.na.fill(50).show()\n +---+------+-----+\n |age|height| name|\n +---+------+-----+\n | 10| 80|Alice|\n | 5| 50| Bob|\n | 50| 50| Tom|\n | 50| 50| null|\n +---+------+-----+\n\n >>> df5.na.fill(False).show()\n +----+-------+-----+\n | age| name| spy|\n +----+-------+-----+\n | 10| Alice|false|\n | 5| Bob|false|\n |null|Mallory| true|\n +----+-------+-----+\n\n >>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()\n +---+------+-------+\n |age|height| name|\n +---+------+-------+\n | 10| 80| Alice|\n | 5| null| Bob|\n | 50| null| Tom|\n | 50| null|unknown|\n +---+------+-------+\n \"\"\"\n if not isinstance(value, (float, int, long, basestring, bool, dict)):\n raise ValueError(\"value should be a float, int, long, string, bool or dict\")\n\n # Note that bool validates isinstance(int), but we don't want to\n # convert bools to floats\n\n if not isinstance(value, bool) and isinstance(value, (int, long)):\n value = float(value)\n\n if isinstance(value, dict):\n return DataFrame(self._jdf.na().fill(value), self.sql_ctx)\n elif subset is None:\n return DataFrame(self._jdf.na().fill(value), self.sql_ctx)\n else:\n if isinstance(subset, basestring):\n subset = [subset]\n elif not isinstance(subset, (list, tuple)):\n raise ValueError(\"subset should be a list or tuple of column names\")\n\n return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)\n\n @since(1.4)\n def replace(self, to_replace, value=_NoValue, subset=None):\n \"\"\"Returns a new :class:`DataFrame` replacing a value with another value.\n :func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are\n aliases of each other.\n Values to_replace and value must have the same type and can only be numerics, booleans,\n or strings. Value can have None. When replacing, the new value will be cast\n to the type of the existing column.\n For numeric replacements all values to be replaced should have unique\n floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)\n and arbitrary replacement will be used.\n\n :param to_replace: bool, int, long, float, string, list or dict.\n Value to be replaced.\n If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`\n must be a mapping between a value and a replacement.\n :param value: bool, int, long, float, string, list or None.\n The replacement value must be a bool, int, long, float, string or None. If `value` is a\n list, `value` should be of the same length and type as `to_replace`.\n If `value` is a scalar and `to_replace` is a sequence, then `value` is\n used as a replacement for each item in `to_replace`.\n :param subset: optional list of column names to consider.\n Columns specified in subset that do not have matching data type are ignored.\n For example, if `value` is a string, and subset contains a non-string column,\n then the non-string column is simply ignored.\n\n >>> df4.na.replace(10, 20).show()\n +----+------+-----+\n | age|height| name|\n +----+------+-----+\n | 20| 80|Alice|\n | 5| null| Bob|\n |null| null| Tom|\n |null| null| null|\n +----+------+-----+\n\n >>> df4.na.replace('Alice', None).show()\n +----+------+----+\n | age|height|name|\n +----+------+----+\n | 10| 80|null|\n | 5| null| Bob|\n |null| null| Tom|\n |null| null|null|\n +----+------+----+\n\n >>> df4.na.replace({'Alice': None}).show()\n +----+------+----+\n | age|height|name|\n +----+------+----+\n | 10| 80|null|\n | 5| null| Bob|\n |null| null| Tom|\n |null| null|null|\n +----+------+----+\n\n >>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()\n +----+------+----+\n | age|height|name|\n +----+------+----+\n | 10| 80| A|\n | 5| null| B|\n |null| null| Tom|\n |null| null|null|\n +----+------+----+\n \"\"\"\n if value is _NoValue:\n if isinstance(to_replace, dict):\n value = None\n else:\n raise TypeError(\"value argument is required when to_replace is not a dictionary.\")\n\n # Helper functions\n def all_of(types):\n \"\"\"Given a type or tuple of types and a sequence of xs\n check if each x is instance of type(s)\n\n >>> all_of(bool)([True, False])\n True\n >>> all_of(basestring)([\"a\", 1])\n False\n \"\"\"\n def all_of_(xs):\n return all(isinstance(x, types) for x in xs)\n return all_of_\n\n all_of_bool = all_of(bool)\n all_of_str = all_of(basestring)\n all_of_numeric = all_of((float, int, long))\n\n # Validate input types\n valid_types = (bool, float, int, long, basestring, list, tuple)\n if not isinstance(to_replace, valid_types + (dict, )):\n raise ValueError(\n \"to_replace should be a bool, float, int, long, string, list, tuple, or dict. \"\n \"Got {0}\".format(type(to_replace)))\n\n if not isinstance(value, valid_types) and value is not None \\\n and not isinstance(to_replace, dict):\n raise ValueError(\"If to_replace is not a dict, value should be \"\n \"a bool, float, int, long, string, list, tuple or None. \"\n \"Got {0}\".format(type(value)))\n\n if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):\n if len(to_replace) != len(value):\n raise ValueError(\"to_replace and value lists should be of the same length. \"\n \"Got {0} and {1}\".format(len(to_replace), len(value)))\n\n if not (subset is None or isinstance(subset, (list, tuple, basestring))):\n raise ValueError(\"subset should be a list or tuple of column names, \"\n \"column name or None. Got {0}\".format(type(subset)))\n\n # Reshape input arguments if necessary\n if isinstance(to_replace, (float, int, long, basestring)):\n to_replace = [to_replace]\n\n if isinstance(to_replace, dict):\n rep_dict = to_replace\n if value is not None:\n warnings.warn(\"to_replace is a dict and value is not None. value will be ignored.\")\n else:\n if isinstance(value, (float, int, long, basestring)) or value is None:\n value = [value for _ in range(len(to_replace))]\n rep_dict = dict(zip(to_replace, value))\n\n if isinstance(subset, basestring):\n subset = [subset]\n\n # Verify we were not passed in mixed type generics.\n if not any(all_of_type(rep_dict.keys())\n and all_of_type(x for x in rep_dict.values() if x is not None)\n for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):\n raise ValueError(\"Mixed type replacements are not supported\")\n\n if subset is None:\n return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)\n else:\n return DataFrame(\n self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)\n\n @since(2.0)\n def approxQuantile(self, col, probabilities, relativeError):\n \"\"\"\n Calculates the approximate quantiles of numerical columns of a\n DataFrame.\n\n The result of this algorithm has the following deterministic bound:\n If the DataFrame has N elements and if we request the quantile at\n probability `p` up to error `err`, then the algorithm will return\n a sample `x` from the DataFrame so that the *exact* rank of `x` is\n close to (p * N). More precisely,\n\n floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).\n\n This method implements a variation of the Greenwald-Khanna\n algorithm (with some speed optimizations). The algorithm was first\n present in [[https://doi.org/10.1145/375663.375670\n Space-efficient Online Computation of Quantile Summaries]]\n by Greenwald and Khanna.\n\n Note that null values will be ignored in numerical columns before calculation.\n For columns only containing null values, an empty list is returned.\n\n :param col: str, list.\n Can be a single column name, or a list of names for multiple columns.\n :param probabilities: a list of quantile probabilities\n Each number must belong to [0, 1].\n For example 0 is the minimum, 0.5 is the median, 1 is the maximum.\n :param relativeError: The relative target precision to achieve\n (>= 0). If set to zero, the exact quantiles are computed, which\n could be very expensive. Note that values greater than 1 are\n accepted but give the same result as 1.\n :return: the approximate quantiles at the given probabilities. If\n the input `col` is a string, the output is a list of floats. If the\n input `col` is a list or tuple of strings, the output is also a\n list, but each element in it is a list of floats, i.e., the output\n is a list of list of floats.\n\n .. versionchanged:: 2.2\n Added support for multiple columns.\n \"\"\"\n\n if not isinstance(col, (basestring, list, tuple)):\n raise ValueError(\"col should be a string, list or tuple, but got %r\" % type(col))\n\n isStr = isinstance(col, basestring)\n\n if isinstance(col, tuple):\n col = list(col)\n elif isStr:\n col = [col]\n\n for c in col:\n if not isinstance(c, basestring):\n raise ValueError(\"columns should be strings, but got %r\" % type(c))\n col = _to_list(self._sc, col)\n\n if not isinstance(probabilities, (list, tuple)):\n raise ValueError(\"probabilities should be a list or tuple\")\n if isinstance(probabilities, tuple):\n probabilities = list(probabilities)\n for p in probabilities:\n if not isinstance(p, (float, int, long)) or p < 0 or p > 1:\n raise ValueError(\"probabilities should be numerical (float, int, long) in [0,1].\")\n probabilities = _to_list(self._sc, probabilities)\n\n if not isinstance(relativeError, (float, int, long)) or relativeError < 0:\n raise ValueError(\"relativeError should be numerical (float, int, long) >= 0.\")\n relativeError = float(relativeError)\n\n jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)\n jaq_list = [list(j) for j in jaq]\n return jaq_list[0] if isStr else jaq_list\n\n @since(1.4)\n def corr(self, col1, col2, method=None):\n \"\"\"\n Calculates the correlation of two columns of a DataFrame as a double value.\n Currently only supports the Pearson Correlation Coefficient.\n :func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.\n\n :param col1: The name of the first column\n :param col2: The name of the second column\n :param method: The correlation method. Currently only supports \"pearson\"\n \"\"\"\n if not isinstance(col1, basestring):\n raise ValueError(\"col1 should be a string.\")\n if not isinstance(col2, basestring):\n raise ValueError(\"col2 should be a string.\")\n if not method:\n method = \"pearson\"\n if not method == \"pearson\":\n raise ValueError(\"Currently only the calculation of the Pearson Correlation \" +\n \"coefficient is supported.\")\n return self._jdf.stat().corr(col1, col2, method)\n\n @since(1.4)\n def cov(self, col1, col2):\n \"\"\"\n Calculate the sample covariance for the given columns, specified by their names, as a\n double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.\n\n :param col1: The name of the first column\n :param col2: The name of the second column\n \"\"\"\n if not isinstance(col1, basestring):\n raise ValueError(\"col1 should be a string.\")\n if not isinstance(col2, basestring):\n raise ValueError(\"col2 should be a string.\")\n return self._jdf.stat().cov(col1, col2)\n\n @since(1.4)\n def crosstab(self, col1, col2):\n \"\"\"\n Computes a pair-wise frequency table of the given columns. Also known as a contingency\n table. The number of distinct values for each column should be less than 1e4. At most 1e6\n non-zero pair frequencies will be returned.\n The first column of each row will be the distinct values of `col1` and the column names\n will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.\n Pairs that have no occurrences will have zero as their counts.\n :func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.\n\n :param col1: The name of the first column. Distinct items will make the first item of\n each row.\n :param col2: The name of the second column. Distinct items will make the column names\n of the DataFrame.\n \"\"\"\n if not isinstance(col1, basestring):\n raise ValueError(\"col1 should be a string.\")\n if not isinstance(col2, basestring):\n raise ValueError(\"col2 should be a string.\")\n return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)\n\n @since(1.4)\n def freqItems(self, cols, support=None):\n \"\"\"\n Finding frequent items for columns, possibly with false positives. Using the\n frequent element count algorithm described in\n \"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou\".\n :func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.\n\n .. note:: This function is meant for exploratory data analysis, as we make no\n guarantee about the backward compatibility of the schema of the resulting DataFrame.\n\n :param cols: Names of the columns to calculate frequent items for as a list or tuple of\n strings.\n :param support: The frequency with which to consider an item 'frequent'. Default is 1%.\n The support must be greater than 1e-4.\n \"\"\"\n if isinstance(cols, tuple):\n cols = list(cols)\n if not isinstance(cols, list):\n raise ValueError(\"cols must be a list or tuple of column names as strings.\")\n if not support:\n support = 0.01\n return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def withColumn(self, colName, col):\n \"\"\"\n Returns a new :class:`DataFrame` by adding a column or replacing the\n existing column that has the same name.\n\n The column expression must be an expression over this DataFrame; attempting to add\n a column from some other dataframe will raise an error.\n\n :param colName: string, name of the new column.\n :param col: a :class:`Column` expression for the new column.\n\n .. note:: This method introduces a projection internally. Therefore, calling it multiple\n times, for instance, via loops in order to add multiple columns can generate big\n plans which can cause performance issues and even `StackOverflowException`.\n To avoid this, use :func:`select` with the multiple columns at once.\n\n >>> df.withColumn('age2', df.age + 2).collect()\n [Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]\n\n \"\"\"\n assert isinstance(col, Column), \"col should be Column\"\n return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)\n\n @ignore_unicode_prefix\n @since(1.3)\n def withColumnRenamed(self, existing, new):\n \"\"\"Returns a new :class:`DataFrame` by renaming an existing column.\n This is a no-op if schema doesn't contain the given column name.\n\n :param existing: string, name of the existing column to rename.\n :param new: string, new name of the column.\n\n >>> df.withColumnRenamed('age', 'age2').collect()\n [Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]\n \"\"\"\n return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)\n\n @since(1.4)\n @ignore_unicode_prefix\n def drop(self, *cols):\n \"\"\"Returns a new :class:`DataFrame` that drops the specified column.\n This is a no-op if schema doesn't contain the given column name(s).\n\n :param cols: a string name of the column to drop, or a\n :class:`Column` to drop, or a list of string name of the columns to drop.\n\n >>> df.drop('age').collect()\n [Row(name=u'Alice'), Row(name=u'Bob')]\n\n >>> df.drop(df.age).collect()\n [Row(name=u'Alice'), Row(name=u'Bob')]\n\n >>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()\n [Row(age=5, height=85, name=u'Bob')]\n\n >>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()\n [Row(age=5, name=u'Bob', height=85)]\n\n >>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()\n [Row(name=u'Bob')]\n \"\"\"\n if len(cols) == 1:\n col = cols[0]\n if isinstance(col, basestring):\n jdf = self._jdf.drop(col)\n elif isinstance(col, Column):\n jdf = self._jdf.drop(col._jc)\n else:\n raise TypeError(\"col should be a string or a Column\")\n else:\n for col in cols:\n if not isinstance(col, basestring):\n raise TypeError(\"each col in the param list should be a string\")\n jdf = self._jdf.drop(self._jseq(cols))\n\n return DataFrame(jdf, self.sql_ctx)\n\n @ignore_unicode_prefix\n def toDF(self, *cols):\n \"\"\"Returns a new class:`DataFrame` that with new specified column names\n\n :param cols: list of new column names (string)\n\n >>> df.toDF('f1', 'f2').collect()\n [Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]\n \"\"\"\n jdf = self._jdf.toDF(self._jseq(cols))\n return DataFrame(jdf, self.sql_ctx)\n\n @since(3.0)\n def transform(self, func):\n \"\"\"Returns a new class:`DataFrame`. Concise syntax for chaining custom transformations.\n\n :param func: a function that takes and returns a class:`DataFrame`.\n\n >>> from pyspark.sql.functions import col\n >>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], [\"int\", \"float\"])\n >>> def cast_all_to_int(input_df):\n ... return input_df.select([col(col_name).cast(\"int\") for col_name in input_df.columns])\n >>> def sort_columns_asc(input_df):\n ... return input_df.select(*sorted(input_df.columns))\n >>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()\n +-----+---+\n |float|int|\n +-----+---+\n | 1| 1|\n | 2| 2|\n +-----+---+\n \"\"\"\n result = func(self)\n assert isinstance(result, DataFrame), \"Func returned an instance of type [%s], \" \\\n \"should have been DataFrame.\" % type(result)\n return result\n\n @since(1.3)\n def toPandas(self):\n \"\"\"\n Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.\n\n This is only available if Pandas is installed and available.\n\n .. note:: This method should only be used if the resulting Pandas's DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n .. note:: Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.\n\n >>> df.toPandas() # doctest: +SKIP\n age name\n 0 2 Alice\n 1 5 Bob\n \"\"\"\n from pyspark.sql.utils import require_minimum_pandas_version\n require_minimum_pandas_version()\n\n import pandas as pd\n\n if self.sql_ctx._conf.pandasRespectSessionTimeZone():\n timezone = self.sql_ctx._conf.sessionLocalTimeZone()\n else:\n timezone = None\n\n if self.sql_ctx._conf.arrowPySparkEnabled():\n use_arrow = True\n try:\n from pyspark.sql.types import to_arrow_schema\n from pyspark.sql.utils import require_minimum_pyarrow_version\n\n require_minimum_pyarrow_version()\n to_arrow_schema(self.schema)\n except Exception as e:\n\n if self.sql_ctx._conf.arrowPySparkFallbackEnabled():\n msg = (\n \"toPandas attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, \"\n \"failed by the reason below:\\n %s\\n\"\n \"Attempting non-optimization as \"\n \"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to \"\n \"true.\" % _exception_message(e))\n warnings.warn(msg)\n use_arrow = False\n else:\n msg = (\n \"toPandas attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has \"\n \"reached the error below and will not continue because automatic fallback \"\n \"with 'spark.sql.execution.arrow.pyspark.fallback.enabled' has been set to \"\n \"false.\\n %s\" % _exception_message(e))\n warnings.warn(msg)\n raise\n\n # Try to use Arrow optimization when the schema is supported and the required version\n # of PyArrow is found, if 'spark.sql.execution.arrow.pyspark.enabled' is enabled.\n if use_arrow:\n try:\n from pyspark.sql.types import _check_dataframe_localize_timestamps\n import pyarrow\n batches = self._collectAsArrow()\n if len(batches) > 0:\n table = pyarrow.Table.from_batches(batches)\n # Pandas DataFrame created from PyArrow uses datetime64[ns] for date type\n # values, but we should use datetime.date to match the behavior with when\n # Arrow optimization is disabled.\n pdf = table.to_pandas(date_as_object=True)\n return _check_dataframe_localize_timestamps(pdf, timezone)\n else:\n return pd.DataFrame.from_records([], columns=self.columns)\n except Exception as e:\n # We might have to allow fallback here as well but multiple Spark jobs can\n # be executed. So, simply fail in this case for now.\n msg = (\n \"toPandas attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has \"\n \"reached the error below and can not continue. Note that \"\n \"'spark.sql.execution.arrow.pyspark.fallback.enabled' does not have an \"\n \"effect on failures in the middle of \"\n \"computation.\\n %s\" % _exception_message(e))\n warnings.warn(msg)\n raise\n\n # Below is toPandas without Arrow optimization.\n pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)\n\n dtype = {}\n for field in self.schema:\n pandas_type = _to_corrected_pandas_type(field.dataType)\n # SPARK-21766: if an integer field is nullable and has null values, it can be\n # inferred by pandas as float column. Once we convert the column with NaN back\n # to integer type e.g., np.int16, we will hit exception. So we use the inferred\n # float type, not the corrected type from the schema in this case.\n if pandas_type is not None and \\\n not(isinstance(field.dataType, IntegralType) and field.nullable and\n pdf[field.name].isnull().any()):\n dtype[field.name] = pandas_type\n\n for f, t in dtype.items():\n pdf[f] = pdf[f].astype(t, copy=False)\n\n if timezone is None:\n return pdf\n else:\n from pyspark.sql.types import _check_series_convert_timestamps_local_tz\n for field in self.schema:\n # TODO: handle nested timestamps, such as ArrayType(TimestampType())?\n if isinstance(field.dataType, TimestampType):\n pdf[field.name] = \\\n _check_series_convert_timestamps_local_tz(pdf[field.name], timezone)\n return pdf\n\n def mapInPandas(self, udf):\n \"\"\"\n Maps an iterator of batches in the current :class:`DataFrame` using a Pandas user-defined\n function and returns the result as a :class:`DataFrame`.\n\n The user-defined function should take an iterator of `pandas.DataFrame`\\\\s and return\n another iterator of `pandas.DataFrame`\\\\s. All columns are passed\n together as an iterator of `pandas.DataFrame`\\\\s to the user-defined function and the\n returned iterator of `pandas.DataFrame`\\\\s are combined as a :class:`DataFrame`.\n Each `pandas.DataFrame` size can be controlled by\n `spark.sql.execution.arrow.maxRecordsPerBatch`.\n Its schema must match the returnType of the Pandas user-defined function.\n\n :param udf: A function object returned by :meth:`pyspark.sql.functions.pandas_udf`\n\n >>> from pyspark.sql.functions import pandas_udf, PandasUDFType\n >>> df = spark.createDataFrame([(1, 21), (2, 30)],\n ... (\"id\", \"age\")) # doctest: +SKIP\n >>> @pandas_udf(df.schema, PandasUDFType.MAP_ITER) # doctest: +SKIP\n ... def filter_func(batch_iter):\n ... for pdf in batch_iter:\n ... yield pdf[pdf.id == 1]\n >>> df.mapInPandas(filter_func).show() # doctest: +SKIP\n +---+---+\n | id|age|\n +---+---+\n | 1| 21|\n +---+---+\n\n .. seealso:: :meth:`pyspark.sql.functions.pandas_udf`\n\n \"\"\"\n # Columns are special because hasattr always return True\n if isinstance(udf, Column) or not hasattr(udf, 'func') \\\n or udf.evalType != PythonEvalType.SQL_MAP_PANDAS_ITER_UDF:\n raise ValueError(\"Invalid udf: the udf argument must be a pandas_udf of type \"\n \"MAP_ITER.\")\n\n udf_column = udf(*[self[col] for col in self.columns])\n jdf = self._jdf.mapInPandas(udf_column._jc.expr())\n return DataFrame(jdf, self.sql_ctx)\n\n def _collectAsArrow(self):\n \"\"\"\n Returns all records as a list of ArrowRecordBatches, pyarrow must be installed\n and available on driver and worker Python environments.\n\n .. note:: Experimental.\n \"\"\"\n with SCCallSiteSync(self._sc) as css:\n port, auth_secret, jsocket_auth_server = self._jdf.collectAsArrowToPython()\n\n # Collect list of un-ordered batches where last element is a list of correct order indices\n try:\n results = list(_load_from_socket((port, auth_secret), ArrowCollectSerializer()))\n finally:\n # Join serving thread and raise any exceptions from collectAsArrowToPython\n jsocket_auth_server.getResult()\n\n # Separate RecordBatches from batch order indices in results\n batches = results[:-1]\n batch_order = results[-1]\n\n # Re-order the batch list using the correct order\n return [batches[i] for i in batch_order]\n\n ##########################################################################################\n # Pandas compatibility\n ##########################################################################################\n\n groupby = copy_func(\n groupBy,\n sinceversion=1.4,\n doc=\":func:`groupby` is an alias for :func:`groupBy`.\")\n\n drop_duplicates = copy_func(\n dropDuplicates,\n sinceversion=1.4,\n doc=\":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.\")\n\n where = copy_func(\n filter,\n sinceversion=1.3,\n doc=\":func:`where` is an alias for :func:`filter`.\")\n\n\ndef _to_scala_map(sc, jm):\n \"\"\"\n Convert a dict into a JVM Map.\n \"\"\"\n return sc._jvm.PythonUtils.toScalaMap(jm)\n\n\ndef _to_corrected_pandas_type(dt):\n \"\"\"\n When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong.\n This method gets the corrected data type for Pandas if that type may be inferred uncorrectly.\n \"\"\"\n import numpy as np\n if type(dt) == ByteType:\n return np.int8\n elif type(dt) == ShortType:\n return np.int16\n elif type(dt) == IntegerType:\n return np.int32\n elif type(dt) == FloatType:\n return np.float32\n else:\n return None\n\n\nclass DataFrameNaFunctions(object):\n \"\"\"Functionality for working with missing data in :class:`DataFrame`.\n\n .. versionadded:: 1.4\n \"\"\"\n\n def __init__(self, df):\n self.df = df\n\n def drop(self, how='any', thresh=None, subset=None):\n return self.df.dropna(how=how, thresh=thresh, subset=subset)\n\n drop.__doc__ = DataFrame.dropna.__doc__\n\n def fill(self, value, subset=None):\n return self.df.fillna(value=value, subset=subset)\n\n fill.__doc__ = DataFrame.fillna.__doc__\n\n def replace(self, to_replace, value=_NoValue, subset=None):\n return self.df.replace(to_replace, value, subset)\n\n replace.__doc__ = DataFrame.replace.__doc__\n\n\nclass DataFrameStatFunctions(object):\n \"\"\"Functionality for statistic functions with :class:`DataFrame`.\n\n .. versionadded:: 1.4\n \"\"\"\n\n def __init__(self, df):\n self.df = df\n\n def approxQuantile(self, col, probabilities, relativeError):\n return self.df.approxQuantile(col, probabilities, relativeError)\n\n approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__\n\n def corr(self, col1, col2, method=None):\n return self.df.corr(col1, col2, method)\n\n corr.__doc__ = DataFrame.corr.__doc__\n\n def cov(self, col1, col2):\n return self.df.cov(col1, col2)\n\n cov.__doc__ = DataFrame.cov.__doc__\n\n def crosstab(self, col1, col2):\n return self.df.crosstab(col1, col2)\n\n crosstab.__doc__ = DataFrame.crosstab.__doc__\n\n def freqItems(self, cols, support=None):\n return self.df.freqItems(cols, support)\n\n freqItems.__doc__ = DataFrame.freqItems.__doc__\n\n def sampleBy(self, col, fractions, seed=None):\n return self.df.sampleBy(col, fractions, seed)\n\n sampleBy.__doc__ = DataFrame.sampleBy.__doc__\n\n\ndef _test():\n import doctest\n from pyspark.context import SparkContext\n from pyspark.sql import Row, SQLContext, SparkSession\n import pyspark.sql.dataframe\n from pyspark.sql.functions import from_unixtime\n globs = pyspark.sql.dataframe.__dict__.copy()\n sc = SparkContext('local[4]', 'PythonTest')\n globs['sc'] = sc\n globs['sqlContext'] = SQLContext(sc)\n globs['spark'] = SparkSession(sc)\n globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\\\n .toDF(StructType([StructField('age', IntegerType()),\n StructField('name', StringType())]))\n globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()\n globs['df3'] = sc.parallelize([Row(name='Alice', age=2),\n Row(name='Bob', age=5)]).toDF()\n globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),\n Row(name='Bob', age=5, height=None),\n Row(name='Tom', age=None, height=None),\n Row(name=None, age=None, height=None)]).toDF()\n globs['df5'] = sc.parallelize([Row(name='Alice', spy=False, age=10),\n Row(name='Bob', spy=None, age=5),\n Row(name='Mallory', spy=True, age=None)]).toDF()\n globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),\n Row(name='Bob', time=1479442946)]).toDF()\n\n (failure_count, test_count) = doctest.testmod(\n pyspark.sql.dataframe, globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)\n globs['sc'].stop()\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n" ]
[ [ "pandas.DataFrame.from_records" ] ]
simoncometto/PID
[ "b6ea1c4ff44f64cae6c0f90542bcda5de5599de1" ]
[ "PID.py" ]
[ "import numpy as np\nfrom time import time\nimport matplotlib.pyplot as plt\n\nclass pid:\n '''\n \"Dsicret PID controller\"\n '''\n\n def __init__(self,\n Kp=1.0, Ki=0, Kd=0,\n setpoint=0,\n output_limits=(None, None),\n integrate_methode = 'trapezoidal'):\n\n self.Kp, self.Ki, self.Kd = Kp, Ki, Kd\n self.setpoint = setpoint\n self._min_output, self._max_output = output_limits\n self.integrate_methode = integrate_methode\n\n self.__last_t = time()\n self.__last_error = np.zeros(3, dtype=float)\n self.__last_I = 0\n return\n\n def __call__(self, measurment, dt=None):\n error = self.setpoint - measurment\n self.__last_error = np.roll(self.__last_error, -1)\n self.__last_error[-1] = error\n\n if dt == None:\n now = time()\n dt = now - self.__last_t\n self.__last_t = now\n\n #Termino Propocional -------------------------------------------\n P = error * self.Kp\n\n #Termino Integral ----------------------------------------------\n I = self.__last_I + self.Ki * self.__integrate(dt)\n self.__last_I = I\n #Termino Diferencial -------------------------------------------\n D = self.Kd * self.__differentiate(dt)\n\n output = P + I + D\n return output\n\n def set_parameters(self, Kp, Ki, Kd):\n self.Kp = Kp\n self.Ki = Ki\n self.Kd = Kd\n return\n\n def set_setpoint(self, setpoint):\n self.setpoint = setpoint\n return\n\n def __integrate(self, dt):\n if self.integrate_methode == 'rectangular':\n I = self.__last_error[-1] * dt\n elif self.integrate_methode == 'trapezoidal':\n I = dt * 0.5 * (self.__last_error[-2] + self.__last_error[-1])\n elif self.integrate_methode == 'simpson':\n #No implementado\n I = 0\n else:\n disp = 'No se reconoce a :' + self.integrate_methode + 'como un método de integración'\n raise ValueError(disp)\n return I\n\n def __differentiate(self, dt):\n D = (self.__last_error[-1] - self.__last_error[-2]) / dt\n return D\n\n\nif __name__ == '__main__':\n dt = 0.5\n t_step = 4 + dt\n t_end = 11\n\n t_init = np.arange(0, t_step, dt)\n t = np.arange(t_step, t_end,dt)\n\n x_init = np.zeros(len(t_init))\n x = np.ones(len(t))\n\n t = np.concatenate((t_init, t))\n x = np.concatenate((x_init, x))\n #x = np.zeros(len(t))\n #x[5] = 1\n y = np.empty(len(t))\n\n PID = pid(Kp=1, Ki=0.1, Kd=0.3)\n\n #print(x)\n for i in range(len(t)):\n PID.set_setpoint(x[i])\n y[i] = PID(0, dt=dt)\n\n plt.step(t,y) #t,x)\n plt.step(t,x)\n plt.legend(['PID','Setpoint'])\n #plt.savefig('PID 1,0.2,0.03 impulse.png', dpi=300)\n plt.show()\n\n '''\n y = np.empty(len(t))\n\n measurment = 0 #Lazo abierto\n\n PID = pid(Kp=1.0, Ki=0.2, Kd=0.005)\n\n f = np.arange(0,(100)*100,100)\n P = np.empty(100, dtype=float)\n\n for j in range(100):\n freq = j * 100\n x = np.sin(2 * np.pi * freq * t)\n\n for i in range(len(t)):\n PID.set_setpoint(x[i])\n y[i] = PID(measurment, dt=dt)\n\n P[j] = np.sqrt(np.mean(y**2))\n\n\n#plt.plot(t, y)\nprint(len(f))\nprint(len(P))\nplt.plot(f, P)\nplt.show()'''" ]
[ [ "numpy.concatenate", "numpy.zeros", "matplotlib.pyplot.step", "matplotlib.pyplot.legend", "numpy.roll", "numpy.arange", "matplotlib.pyplot.show" ] ]
silky/QuCumber
[ "f0dd8725b8dd3a0c94f10f1a3b88a769c63a567f" ]
[ "tests/grads_utils.py" ]
[ "# Copyright 2018 PIQuIL - All Rights Reserved\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport numpy as np\nimport torch\n\nfrom qucumber.utils import cplx\n\n\nclass PosGradsUtils:\n def __init__(self, nn_state):\n self.nn_state = nn_state\n\n def compute_numerical_kl(self, target_psi, vis, Z):\n KL = 0.0\n for i in range(len(vis)):\n KL += ((target_psi[i, 0]) ** 2) * ((target_psi[i, 0]) ** 2).log()\n KL -= ((target_psi[i, 0]) ** 2) * (\n self.nn_state.probability(vis[i], Z)\n ).log().item()\n return KL\n\n def compute_numerical_NLL(self, data, Z):\n NLL = 0\n batch_size = len(data)\n\n for i in range(batch_size):\n NLL -= self.nn_state.probability(data[i], Z).log().item() / float(\n batch_size\n )\n\n return NLL\n\n def algorithmic_gradKL(self, target_psi, vis, **kwargs):\n Z = self.nn_state.compute_normalization(vis)\n grad_KL = torch.zeros(\n self.nn_state.rbm_am.num_pars,\n dtype=torch.double,\n device=self.nn_state.device,\n )\n for i in range(len(vis)):\n grad_KL += ((target_psi[i, 0]) ** 2) * self.nn_state.gradient(vis[i])\n grad_KL -= self.nn_state.probability(vis[i], Z) * self.nn_state.gradient(\n vis[i]\n )\n return [grad_KL]\n\n def algorithmic_gradNLL(self, data, k, **kwargs):\n return self.nn_state.compute_batch_gradients(k, data, data)\n\n def numeric_gradKL(self, target_psi, param, vis, eps, **kwargs):\n num_gradKL = []\n for i in range(len(param)):\n param[i] += eps\n\n Z = self.nn_state.compute_normalization(vis)\n KL_p = self.compute_numerical_kl(target_psi, vis, Z)\n\n param[i] -= 2 * eps\n\n Z = self.nn_state.compute_normalization(vis)\n KL_m = self.compute_numerical_kl(target_psi, vis, Z)\n\n param[i] += eps\n\n num_gradKL.append((KL_p - KL_m) / (2 * eps))\n\n return torch.stack(num_gradKL).to(param)\n\n def numeric_gradNLL(self, param, data, vis, eps, **kwargs):\n num_gradNLL = []\n for i in range(len(param)):\n param[i] += eps\n\n Z = self.nn_state.compute_normalization(vis)\n NLL_p = self.compute_numerical_NLL(data, Z)\n\n param[i] -= 2 * eps\n\n Z = self.nn_state.compute_normalization(vis)\n NLL_m = self.compute_numerical_NLL(data, Z)\n\n param[i] += eps\n\n num_gradNLL.append((NLL_p - NLL_m) / (2 * eps))\n\n return torch.tensor(np.array(num_gradNLL), dtype=torch.double).to(param)\n\n\nclass ComplexGradsUtils:\n def __init__(self, nn_state):\n self.nn_state = nn_state\n\n def load_target_psi(self, bases, psi_data):\n psi_dict = {}\n D = int(len(psi_data) / float(len(bases)))\n\n for b in range(len(bases)):\n psi = torch.zeros(2, D, dtype=torch.double)\n psi_real = torch.tensor(\n psi_data[b * D : D * (b + 1), 0], dtype=torch.double\n )\n psi_imag = torch.tensor(\n psi_data[b * D : D * (b + 1), 1], dtype=torch.double\n )\n psi[0] = psi_real\n psi[1] = psi_imag\n psi_dict[bases[b]] = psi\n\n return psi_dict\n\n def transform_bases(self, bases_data):\n bases = []\n for i in range(len(bases_data)):\n tmp = \"\"\n for j in range(len(bases_data[i])):\n if bases_data[i][j] is not \" \":\n tmp += bases_data[i][j]\n bases.append(tmp)\n return bases\n\n def rotate_psi_full(self, basis, full_unitary_dict, psi):\n U = full_unitary_dict[basis]\n Upsi = cplx.matmul(U, psi)\n return Upsi\n\n def rotate_psi(self, basis, unitary_dict, vis):\n N = self.nn_state.num_visible\n v = torch.zeros(N, dtype=torch.double, device=self.nn_state.device)\n psi_r = torch.zeros(2, 1 << N, dtype=torch.double, device=self.nn_state.device)\n\n for x in range(1 << N):\n Upsi = torch.zeros(2, dtype=torch.double, device=self.nn_state.device)\n num_nontrivial_U = 0\n nontrivial_sites = []\n for j in range(N):\n if basis[j] is not \"Z\":\n num_nontrivial_U += 1\n nontrivial_sites.append(j)\n sub_state = self.nn_state.generate_hilbert_space(num_nontrivial_U)\n\n for xp in range(1 << num_nontrivial_U):\n cnt = 0\n for j in range(N):\n if basis[j] is not \"Z\":\n v[j] = sub_state[xp][cnt]\n cnt += 1\n else:\n v[j] = vis[x, j]\n\n U = torch.tensor(\n [1.0, 0.0], dtype=torch.double, device=self.nn_state.device\n )\n for ii in range(num_nontrivial_U):\n tmp = unitary_dict[basis[nontrivial_sites[ii]]]\n tmp = tmp[\n :,\n int(vis[x][nontrivial_sites[ii]]),\n int(v[nontrivial_sites[ii]]),\n ]\n U = cplx.scalar_mult(U, tmp)\n\n Upsi += cplx.scalar_mult(U, self.nn_state.psi(v))\n\n psi_r[:, x] = Upsi\n return psi_r\n\n def compute_numerical_NLL(self, data_samples, data_bases, Z, unitary_dict, vis):\n NLL = 0\n batch_size = len(data_samples)\n b_flag = 0\n for i in range(batch_size):\n bitstate = []\n for j in range(self.nn_state.num_visible):\n ind = 0\n if data_bases[i][j] != \"Z\":\n b_flag = 1\n bitstate.append(int(data_samples[i, j].item()))\n ind = int(\"\".join(str(i) for i in bitstate), 2)\n if b_flag == 0:\n NLL -= (\n self.nn_state.probability(data_samples[i], Z)\n ).log().item() / batch_size\n else:\n psi_r = self.rotate_psi(data_bases[i], unitary_dict, vis)\n NLL -= (\n cplx.norm_sqr(psi_r[:, ind]).log() - Z.log()\n ).item() / batch_size\n return NLL\n\n def compute_numerical_kl(self, psi_dict, vis, Z, unitary_dict, bases):\n N = self.nn_state.num_visible\n psi_r = torch.zeros(2, 1 << N, dtype=torch.double, device=self.nn_state.device)\n KL = 0.0\n for i in range(len(vis)):\n KL += (\n cplx.norm_sqr(psi_dict[bases[0]][:, i])\n * cplx.norm_sqr(psi_dict[bases[0]][:, i]).log()\n / float(len(bases))\n )\n KL -= (\n cplx.norm_sqr(psi_dict[bases[0]][:, i])\n * self.nn_state.probability(vis[i], Z).log().item()\n / float(len(bases))\n )\n\n for b in range(1, len(bases)):\n psi_r = self.rotate_psi(bases[b], unitary_dict, vis)\n for ii in range(len(vis)):\n if cplx.norm_sqr(psi_dict[bases[b]][:, ii]) > 0.0:\n KL += (\n cplx.norm_sqr(psi_dict[bases[b]][:, ii])\n * cplx.norm_sqr(psi_dict[bases[b]][:, ii]).log()\n / float(len(bases))\n )\n\n KL -= (\n cplx.norm_sqr(psi_dict[bases[b]][:, ii])\n * cplx.norm_sqr(psi_r[:, ii]).log()\n / float(len(bases))\n )\n KL += (\n cplx.norm_sqr(psi_dict[bases[b]][:, ii])\n * Z.log()\n / float(len(bases))\n )\n\n return KL\n\n def algorithmic_gradNLL(self, data_samples, data_bases, k, **kwargs):\n return self.nn_state.compute_batch_gradients(\n k, data_samples, data_samples, data_bases\n )\n\n def numeric_gradNLL(\n self, data_samples, data_bases, unitary_dict, param, vis, eps, **kwargs\n ):\n num_gradNLL = []\n for i in range(len(param)):\n param[i] += eps\n\n Z = self.nn_state.compute_normalization(vis)\n NLL_p = self.compute_numerical_NLL(\n data_samples, data_bases, Z, unitary_dict, vis\n )\n param[i] -= 2 * eps\n\n Z = self.nn_state.compute_normalization(vis)\n NLL_m = self.compute_numerical_NLL(\n data_samples, data_bases, Z, unitary_dict, vis\n )\n\n param[i] += eps\n\n num_gradNLL.append((NLL_p - NLL_m) / (2 * eps))\n\n return torch.tensor(np.array(num_gradNLL), dtype=torch.double).to(param)\n\n def numeric_gradKL(self, param, psi_dict, vis, unitary_dict, bases, eps, **kwargs):\n num_gradKL = []\n for i in range(len(param)):\n param[i] += eps\n\n Z = self.nn_state.compute_normalization(vis)\n KL_p = self.compute_numerical_kl(psi_dict, vis, Z, unitary_dict, bases)\n\n param[i] -= 2 * eps\n\n Z = self.nn_state.compute_normalization(vis)\n KL_m = self.compute_numerical_kl(psi_dict, vis, Z, unitary_dict, bases)\n param[i] += eps\n\n num_gradKL.append((KL_p - KL_m) / (2 * eps))\n\n return torch.stack(num_gradKL).to(param)\n\n def algorithmic_gradKL(self, psi_dict, vis, unitary_dict, bases, **kwargs):\n grad_KL = [\n torch.zeros(\n self.nn_state.rbm_am.num_pars,\n dtype=torch.double,\n device=self.nn_state.device,\n ),\n torch.zeros(\n self.nn_state.rbm_ph.num_pars,\n dtype=torch.double,\n device=self.nn_state.device,\n ),\n ]\n Z = self.nn_state.compute_normalization(vis).to(device=self.nn_state.device)\n\n for i in range(len(vis)):\n grad_KL[0] += (\n cplx.norm_sqr(psi_dict[bases[0]][:, i])\n * self.nn_state.rbm_am.effective_energy_gradient(vis[i])\n / float(len(bases))\n )\n grad_KL[0] -= (\n self.nn_state.probability(vis[i], Z)\n * self.nn_state.rbm_am.effective_energy_gradient(vis[i])\n / float(len(bases))\n )\n\n for b in range(1, len(bases)):\n for i in range(len(vis)):\n rotated_grad = self.nn_state.gradient(bases[b], vis[i])\n grad_KL[0] += (\n cplx.norm_sqr(psi_dict[bases[b]][:, i])\n * rotated_grad[0]\n / float(len(bases))\n )\n grad_KL[1] += (\n cplx.norm_sqr(psi_dict[bases[b]][:, i])\n * rotated_grad[1]\n / float(len(bases))\n )\n grad_KL[0] -= (\n self.nn_state.probability(vis[i], Z)\n * self.nn_state.rbm_am.effective_energy_gradient(vis[i])\n / float(len(bases))\n )\n return grad_KL\n" ]
[ [ "torch.zeros", "numpy.array", "torch.tensor", "torch.stack" ] ]
findepi/spark
[ "1a3f621c20a1be0670b59a692e0ea5297dd6202f" ]
[ "python/pyspark/pandas/indexes/base.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom functools import partial\nfrom typing import (\n Any,\n Callable,\n Iterator,\n List,\n Optional,\n Tuple,\n Union,\n cast,\n no_type_check,\n TYPE_CHECKING,\n)\nimport warnings\n\nimport pandas as pd\nimport numpy as np\nfrom pandas.api.types import ( # type: ignore[attr-defined]\n is_list_like,\n is_interval_dtype,\n is_bool_dtype,\n is_categorical_dtype,\n is_integer_dtype,\n is_float_dtype,\n is_numeric_dtype,\n is_object_dtype,\n)\nfrom pandas.core.accessor import CachedAccessor\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.api.types import CategoricalDtype, is_hashable # type: ignore[attr-defined]\nfrom pandas._libs import lib\n\nfrom pyspark.sql import functions as F, Column\nfrom pyspark.sql.types import (\n DayTimeIntervalType,\n FractionalType,\n IntegralType,\n TimestampType,\n TimestampNTZType,\n)\n\nfrom pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.\nfrom pyspark.pandas._typing import Dtype, Label, Name, Scalar\nfrom pyspark.pandas.config import get_option, option_context\nfrom pyspark.pandas.base import IndexOpsMixin\nfrom pyspark.pandas.frame import DataFrame\nfrom pyspark.pandas.missing.indexes import MissingPandasLikeIndex\nfrom pyspark.pandas.series import Series, first_series\nfrom pyspark.pandas.spark import functions as SF\nfrom pyspark.pandas.spark.accessors import SparkIndexMethods\nfrom pyspark.pandas.utils import (\n is_name_like_tuple,\n is_name_like_value,\n name_like_string,\n same_anchor,\n scol_for,\n verify_temp_column_name,\n validate_bool_kwarg,\n ERROR_MESSAGE_CANNOT_COMBINE,\n log_advice,\n)\nfrom pyspark.pandas.internal import (\n InternalField,\n InternalFrame,\n DEFAULT_SERIES_NAME,\n SPARK_DEFAULT_INDEX_NAME,\n SPARK_INDEX_NAME_FORMAT,\n)\n\nif TYPE_CHECKING:\n from pyspark.pandas.spark.accessors import SparkIndexOpsMethods\n\n\nclass Index(IndexOpsMixin):\n \"\"\"\n pandas-on-Spark Index that corresponds to pandas Index logically. This might hold Spark Column\n internally.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : dtype, default None\n If dtype is None, we find the dtype that best fits the data.\n If an actual dtype is provided, we coerce to that dtype if it's safe.\n Otherwise, an error will be raised.\n copy : bool\n Make a copy of input ndarray.\n name : object\n Name to be stored in the index.\n tupleize_cols : bool (default: True)\n When True, attempt to create a MultiIndex if possible.\n\n See Also\n --------\n MultiIndex : A multi-level, or hierarchical, Index.\n DatetimeIndex : Index of datetime64 data.\n Int64Index : A special case of :class:`Index` with purely integer labels.\n Float64Index : A special case of :class:`Index` with purely float labels.\n\n Examples\n --------\n >>> ps.DataFrame({'a': ['a', 'b', 'c']}, index=[1, 2, 3]).index\n Int64Index([1, 2, 3], dtype='int64')\n\n >>> ps.DataFrame({'a': [1, 2, 3]}, index=list('abc')).index\n Index(['a', 'b', 'c'], dtype='object')\n\n >>> ps.Index([1, 2, 3])\n Int64Index([1, 2, 3], dtype='int64')\n\n >>> ps.Index(list('abc'))\n Index(['a', 'b', 'c'], dtype='object')\n\n From a Series:\n\n >>> s = ps.Series([1, 2, 3], index=[10, 20, 30])\n >>> ps.Index(s)\n Int64Index([1, 2, 3], dtype='int64')\n\n From an Index:\n\n >>> idx = ps.Index([1, 2, 3])\n >>> ps.Index(idx)\n Int64Index([1, 2, 3], dtype='int64')\n \"\"\"\n\n def __new__(\n cls,\n data: Optional[Any] = None,\n dtype: Optional[Union[str, Dtype]] = None,\n copy: bool = False,\n name: Optional[Name] = None,\n tupleize_cols: bool = True,\n **kwargs: Any,\n ) -> \"Index\":\n if not is_hashable(name):\n raise TypeError(\"Index.name must be a hashable type\")\n\n if isinstance(data, Series):\n if dtype is not None:\n data = data.astype(dtype)\n if name is not None:\n data = data.rename(name)\n\n internal = InternalFrame(\n spark_frame=data._internal.spark_frame,\n index_spark_columns=data._internal.data_spark_columns,\n index_names=data._internal.column_labels,\n index_fields=data._internal.data_fields,\n column_labels=[],\n data_spark_columns=[],\n data_fields=[],\n )\n return DataFrame(internal).index\n elif isinstance(data, Index):\n if copy:\n data = data.copy()\n if dtype is not None:\n data = data.astype(dtype)\n if name is not None:\n data = data.rename(name)\n return data\n\n return cast(\n Index,\n ps.from_pandas(\n pd.Index(\n data=data,\n dtype=dtype,\n copy=copy,\n name=name,\n tupleize_cols=tupleize_cols,\n **kwargs,\n )\n ),\n )\n\n @staticmethod\n def _new_instance(anchor: DataFrame) -> \"Index\":\n from pyspark.pandas.indexes.category import CategoricalIndex\n from pyspark.pandas.indexes.datetimes import DatetimeIndex\n from pyspark.pandas.indexes.multi import MultiIndex\n from pyspark.pandas.indexes.numeric import Float64Index, Int64Index\n from pyspark.pandas.indexes.timedelta import TimedeltaIndex\n\n instance: Index\n if anchor._internal.index_level > 1:\n instance = object.__new__(MultiIndex)\n elif isinstance(anchor._internal.index_fields[0].dtype, CategoricalDtype):\n instance = object.__new__(CategoricalIndex)\n elif isinstance(\n anchor._internal.spark_type_for(anchor._internal.index_spark_columns[0]), IntegralType\n ):\n instance = object.__new__(Int64Index)\n elif isinstance(\n anchor._internal.spark_type_for(anchor._internal.index_spark_columns[0]), FractionalType\n ):\n instance = object.__new__(Float64Index)\n elif isinstance(\n anchor._internal.spark_type_for(anchor._internal.index_spark_columns[0]),\n (TimestampType, TimestampNTZType),\n ):\n instance = object.__new__(DatetimeIndex)\n elif isinstance(\n anchor._internal.spark_type_for(anchor._internal.index_spark_columns[0]),\n DayTimeIntervalType,\n ):\n instance = object.__new__(TimedeltaIndex)\n else:\n instance = object.__new__(Index)\n\n instance._anchor = anchor # type: ignore[attr-defined]\n return instance\n\n @property\n def _psdf(self) -> DataFrame:\n return self._anchor\n\n @property\n def _internal(self) -> InternalFrame:\n internal = self._psdf._internal\n return internal.copy(\n column_labels=internal.index_names,\n data_spark_columns=internal.index_spark_columns,\n data_fields=internal.index_fields,\n column_label_names=None,\n )\n\n @property\n def _column_label(self) -> Optional[Label]:\n return self._psdf._internal.index_names[0]\n\n def _with_new_scol(self, scol: Column, *, field: Optional[InternalField] = None) -> \"Index\":\n \"\"\"\n Copy pandas-on-Spark Index with the new Spark Column.\n\n :param scol: the new Spark Column\n :return: the copied Index\n \"\"\"\n internal = self._internal.copy(\n index_spark_columns=[scol.alias(SPARK_DEFAULT_INDEX_NAME)],\n index_fields=[\n field\n if field is None or field.struct_field is None\n else field.copy(name=SPARK_DEFAULT_INDEX_NAME)\n ],\n column_labels=[],\n data_spark_columns=[],\n data_fields=[],\n )\n return DataFrame(internal).index\n\n spark: \"SparkIndexOpsMethods\" = CachedAccessor(\"spark\", SparkIndexMethods) # type: ignore[assignment]\n\n # This method is used via `DataFrame.info` API internally.\n def _summary(self, name: Optional[str] = None) -> str:\n \"\"\"\n Return a summarized representation.\n\n Parameters\n ----------\n name : str\n name to use in the summary representation\n\n Returns\n -------\n String with a summarized representation of the index\n \"\"\"\n head, tail, total_count = tuple(\n cast(\n pd.DataFrame,\n self._internal.spark_frame.select(\n F.first(self.spark.column), F.last(self.spark.column), F.count(F.expr(\"*\"))\n ).toPandas(),\n ).iloc[0]\n )\n\n if total_count > 0:\n index_summary = \", %s to %s\" % (pprint_thing(head), pprint_thing(tail))\n else:\n index_summary = \"\"\n\n if name is None:\n name = type(self).__name__\n return \"%s: %s entries%s\" % (name, total_count, index_summary)\n\n @property\n def size(self) -> int:\n \"\"\"\n Return an int representing the number of elements in this object.\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'],\n ... index=list('abcd'))\n >>> df.index.size\n 4\n\n >>> df.set_index('dogs', append=True).index.size\n 4\n \"\"\"\n return len(self)\n\n @property\n def shape(self) -> tuple:\n \"\"\"\n Return a tuple of the shape of the underlying data.\n\n Examples\n --------\n >>> idx = ps.Index(['a', 'b', 'c'])\n >>> idx\n Index(['a', 'b', 'c'], dtype='object')\n >>> idx.shape\n (3,)\n\n >>> midx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> midx # doctest: +SKIP\n MultiIndex([('a', 'x'),\n ('b', 'y'),\n ('c', 'z')],\n )\n >>> midx.shape\n (3,)\n \"\"\"\n return (len(self._psdf),)\n\n def identical(self, other: \"Index\") -> bool:\n \"\"\"\n Similar to equals, but check that other comparable attributes are\n also equal.\n\n Returns\n -------\n bool\n If two Index objects have equal elements and same type True,\n otherwise False.\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import option_context\n >>> idx = ps.Index(['a', 'b', 'c'])\n >>> midx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n\n For Index\n\n >>> idx.identical(idx)\n True\n >>> with option_context('compute.ops_on_diff_frames', True):\n ... idx.identical(ps.Index(['a', 'b', 'c']))\n True\n >>> with option_context('compute.ops_on_diff_frames', True):\n ... idx.identical(ps.Index(['b', 'b', 'a']))\n False\n >>> idx.identical(midx)\n False\n\n For MultiIndex\n\n >>> midx.identical(midx)\n True\n >>> with option_context('compute.ops_on_diff_frames', True):\n ... midx.identical(ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]))\n True\n >>> with option_context('compute.ops_on_diff_frames', True):\n ... midx.identical(ps.MultiIndex.from_tuples([('c', 'z'), ('b', 'y'), ('a', 'x')]))\n False\n >>> midx.identical(idx)\n False\n \"\"\"\n from pyspark.pandas.indexes.multi import MultiIndex\n\n self_name = self.names if isinstance(self, MultiIndex) else self.name\n other_name = other.names if isinstance(other, MultiIndex) else other.name\n\n return (\n self_name == other_name # to support non-index comparison by short-circuiting.\n and self.equals(other)\n )\n\n def equals(self, other: \"Index\") -> bool:\n \"\"\"\n Determine if two Index objects contain the same elements.\n\n Returns\n -------\n bool\n True if \"other\" is an Index and it has the same elements as calling\n index; False otherwise.\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import option_context\n >>> idx = ps.Index(['a', 'b', 'c'])\n >>> idx.name = \"name\"\n >>> midx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> midx.names = (\"nameA\", \"nameB\")\n\n For Index\n\n >>> idx.equals(idx)\n True\n >>> with option_context('compute.ops_on_diff_frames', True):\n ... idx.equals(ps.Index(['a', 'b', 'c']))\n True\n >>> with option_context('compute.ops_on_diff_frames', True):\n ... idx.equals(ps.Index(['b', 'b', 'a']))\n False\n >>> idx.equals(midx)\n False\n\n For MultiIndex\n\n >>> midx.equals(midx)\n True\n >>> with option_context('compute.ops_on_diff_frames', True):\n ... midx.equals(ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]))\n True\n >>> with option_context('compute.ops_on_diff_frames', True):\n ... midx.equals(ps.MultiIndex.from_tuples([('c', 'z'), ('b', 'y'), ('a', 'x')]))\n False\n >>> midx.equals(idx)\n False\n \"\"\"\n if same_anchor(self, other):\n return True\n elif type(self) == type(other):\n if get_option(\"compute.ops_on_diff_frames\"):\n # TODO: avoid using default index?\n with option_context(\"compute.default_index_type\", \"distributed-sequence\"):\n # Directly using Series from both self and other seems causing\n # some exceptions when 'compute.ops_on_diff_frames' is enabled.\n # Working around for now via using frame.\n return (\n cast(Series, self.to_series(\"self\").reset_index(drop=True))\n == cast(Series, other.to_series(\"other\").reset_index(drop=True))\n ).all()\n else:\n raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)\n else:\n return False\n\n def transpose(self) -> \"Index\":\n \"\"\"\n Return the transpose, For index, It will be index itself.\n\n Examples\n --------\n >>> idx = ps.Index(['a', 'b', 'c'])\n >>> idx\n Index(['a', 'b', 'c'], dtype='object')\n\n >>> idx.transpose()\n Index(['a', 'b', 'c'], dtype='object')\n\n For MultiIndex\n\n >>> midx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> midx # doctest: +SKIP\n MultiIndex([('a', 'x'),\n ('b', 'y'),\n ('c', 'z')],\n )\n\n >>> midx.transpose() # doctest: +SKIP\n MultiIndex([('a', 'x'),\n ('b', 'y'),\n ('c', 'z')],\n )\n \"\"\"\n return self\n\n T = property(transpose)\n\n def _to_internal_pandas(self) -> pd.Index:\n \"\"\"\n Return a pandas Index directly from _internal to avoid overhead of copy.\n\n This method is for internal use only.\n \"\"\"\n return self._psdf._internal.to_pandas_frame.index\n\n def to_pandas(self) -> pd.Index:\n \"\"\"\n Return a pandas Index.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'],\n ... index=list('abcd'))\n >>> df['dogs'].index.to_pandas()\n Index(['a', 'b', 'c', 'd'], dtype='object')\n \"\"\"\n log_advice(\n \"`to_pandas` loads all data into the driver's memory. \"\n \"It should only be used if the resulting pandas Index is expected to be small.\"\n )\n return self._to_pandas()\n\n def _to_pandas(self) -> pd.Index:\n \"\"\"\n Same as `to_pandas()`, without issueing the advice log for internal usage.\n \"\"\"\n return self._to_internal_pandas().copy()\n\n def to_numpy(self, dtype: Optional[Union[str, Dtype]] = None, copy: bool = False) -> np.ndarray:\n \"\"\"\n A NumPy ndarray representing the values in this Index or MultiIndex.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to pass to :meth:`numpy.asarray`\n copy : bool, default False\n Whether to ensure that the returned value is a not a view on\n another array. Note that ``copy=False`` does not *ensure* that\n ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that\n a copy is made, even if not strictly necessary.\n\n Returns\n -------\n numpy.ndarray\n\n Examples\n --------\n >>> ps.Series([1, 2, 3, 4]).index.to_numpy()\n array([0, 1, 2, 3])\n >>> ps.DataFrame({'a': ['a', 'b', 'c']}, index=[[1, 2, 3], [4, 5, 6]]).index.to_numpy()\n array([(1, 4), (2, 5), (3, 6)], dtype=object)\n \"\"\"\n log_advice(\n \"`to_numpy` loads all data into the driver's memory. \"\n \"It should only be used if the resulting NumPy ndarray is expected to be small.\"\n )\n result = np.asarray(self._to_internal_pandas()._values, dtype=dtype) # type: ignore[arg-type,attr-defined]\n if copy:\n result = result.copy()\n return result\n\n def map(\n self, mapper: Union[dict, Callable[[Any], Any], pd.Series], na_action: Optional[str] = None\n ) -> \"Index\":\n \"\"\"\n Map values using input correspondence (a dict, Series, or function).\n\n Parameters\n ----------\n mapper : function, dict, or pd.Series\n Mapping correspondence.\n na_action : {None, 'ignore'}\n If ‘ignore’, propagate NA values, without passing them to the mapping correspondence.\n\n Returns\n -------\n applied : Index, inferred\n The output of the mapping function applied to the index.\n\n Examples\n --------\n >>> psidx = ps.Index([1, 2, 3])\n\n >>> psidx.map({1: \"one\", 2: \"two\", 3: \"three\"})\n Index(['one', 'two', 'three'], dtype='object')\n\n >>> psidx.map(lambda id: \"{id} + 1\".format(id=id))\n Index(['1 + 1', '2 + 1', '3 + 1'], dtype='object')\n\n >>> pser = pd.Series([\"one\", \"two\", \"three\"], index=[1, 2, 3])\n >>> psidx.map(pser)\n Index(['one', 'two', 'three'], dtype='object')\n \"\"\"\n if isinstance(mapper, dict):\n if len(set(type(k) for k in mapper.values())) > 1:\n raise TypeError(\n \"If the mapper is a dictionary, its values must be of the same type\"\n )\n\n return Index(\n self.to_series().pandas_on_spark.transform_batch(\n lambda pser: pser.map(mapper, na_action)\n )\n ).rename(self.name)\n\n @property\n def values(self) -> np.ndarray:\n \"\"\"\n Return an array representing the data in the Index.\n\n .. warning:: We recommend using `Index.to_numpy()` instead.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Returns\n -------\n numpy.ndarray\n\n Examples\n --------\n >>> ps.Series([1, 2, 3, 4]).index.values\n array([0, 1, 2, 3])\n >>> ps.DataFrame({'a': ['a', 'b', 'c']}, index=[[1, 2, 3], [4, 5, 6]]).index.values\n array([(1, 4), (2, 5), (3, 6)], dtype=object)\n \"\"\"\n warnings.warn(\"We recommend using `{}.to_numpy()` instead.\".format(type(self).__name__))\n return self.to_numpy()\n\n @property\n def asi8(self) -> np.ndarray:\n \"\"\"\n Integer representation of the values.\n\n .. warning:: We recommend using `Index.to_numpy()` instead.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Returns\n -------\n numpy.ndarray\n An ndarray with int64 dtype.\n\n Examples\n --------\n >>> ps.Index([1, 2, 3]).asi8\n array([1, 2, 3])\n\n Returns None for non-int64 dtype\n\n >>> ps.Index(['a', 'b', 'c']).asi8 is None\n True\n \"\"\"\n warnings.warn(\"We recommend using `{}.to_numpy()` instead.\".format(type(self).__name__))\n if isinstance(self.spark.data_type, IntegralType):\n return self.to_numpy()\n elif isinstance(self.spark.data_type, (TimestampType, TimestampNTZType)):\n return np.array(list(map(lambda x: x.astype(np.int64), self.to_numpy())))\n else:\n return None\n\n @property\n def has_duplicates(self) -> bool:\n \"\"\"\n If index has duplicates, return True, otherwise False.\n\n Examples\n --------\n >>> idx = ps.Index([1, 5, 7, 7])\n >>> idx.has_duplicates\n True\n\n >>> idx = ps.Index([1, 5, 7])\n >>> idx.has_duplicates\n False\n\n >>> idx = ps.Index([\"Watermelon\", \"Orange\", \"Apple\",\n ... \"Watermelon\"])\n >>> idx.has_duplicates\n True\n\n >>> idx = ps.Index([\"Orange\", \"Apple\",\n ... \"Watermelon\"])\n >>> idx.has_duplicates\n False\n \"\"\"\n sdf = self._internal.spark_frame.select(self.spark.column)\n scol = scol_for(sdf, sdf.columns[0])\n\n return sdf.select(F.count(scol) != F.countDistinct(scol)).first()[0]\n\n @property\n def is_unique(self) -> bool:\n \"\"\"\n Return if the index has unique values.\n\n Examples\n --------\n >>> idx = ps.Index([1, 5, 7, 7])\n >>> idx.is_unique\n False\n\n >>> idx = ps.Index([1, 5, 7])\n >>> idx.is_unique\n True\n\n >>> idx = ps.Index([\"Watermelon\", \"Orange\", \"Apple\",\n ... \"Watermelon\"])\n >>> idx.is_unique\n False\n\n >>> idx = ps.Index([\"Orange\", \"Apple\",\n ... \"Watermelon\"])\n >>> idx.is_unique\n True\n \"\"\"\n return not self.has_duplicates\n\n @property\n def name(self) -> Name:\n \"\"\"Return name of the Index.\"\"\"\n return self.names[0]\n\n @name.setter\n def name(self, name: Name) -> None:\n self.names = [name]\n\n @property\n def names(self) -> List[Name]:\n \"\"\"Return names of the Index.\"\"\"\n return [\n name if name is None or len(name) > 1 else name[0]\n for name in self._internal.index_names\n ]\n\n @names.setter\n def names(self, names: List[Name]) -> None:\n if not is_list_like(names):\n raise ValueError(\"Names must be a list-like\")\n if self._internal.index_level != len(names):\n raise ValueError(\n \"Length of new names must be {}, got {}\".format(\n self._internal.index_level, len(names)\n )\n )\n if self._internal.index_level == 1:\n self.rename(names[0], inplace=True)\n else:\n self.rename(names, inplace=True)\n\n @property\n def nlevels(self) -> int:\n \"\"\"\n Number of levels in Index & MultiIndex.\n\n Examples\n --------\n >>> psdf = ps.DataFrame({\"a\": [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'], name=\"idx\"))\n >>> psdf.index.nlevels\n 1\n\n >>> psdf = ps.DataFrame({'a': [1, 2, 3]}, index=[list('abc'), list('def')])\n >>> psdf.index.nlevels\n 2\n \"\"\"\n return self._internal.index_level\n\n def rename(self, name: Union[Name, List[Name]], inplace: bool = False) -> Optional[\"Index\"]:\n \"\"\"\n Alter Index or MultiIndex name.\n Able to set new names without level. Defaults to returning new index.\n\n Parameters\n ----------\n name : label or list of labels\n Name(s) to set.\n inplace : boolean, default False\n Modifies the object directly, instead of creating a new Index or MultiIndex.\n\n Returns\n -------\n Index or MultiIndex\n The same type as the caller or None if inplace is True.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': ['A', 'C'], 'b': ['A', 'B']}, columns=['a', 'b'])\n >>> df.index.rename(\"c\")\n Int64Index([0, 1], dtype='int64', name='c')\n\n >>> df.set_index(\"a\", inplace=True)\n >>> df.index.rename(\"d\")\n Index(['A', 'C'], dtype='object', name='d')\n\n You can also change the index name in place.\n\n >>> df.index.rename(\"e\", inplace=True)\n >>> df.index\n Index(['A', 'C'], dtype='object', name='e')\n\n >>> df # doctest: +NORMALIZE_WHITESPACE\n b\n e\n A A\n C B\n\n Support for MultiIndex\n\n >>> psidx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y')])\n >>> psidx.names = ['hello', 'pandas-on-Spark']\n >>> psidx # doctest: +SKIP\n MultiIndex([('a', 'x'),\n ('b', 'y')],\n names=['hello', 'pandas-on-Spark'])\n\n >>> psidx.rename(['aloha', 'databricks']) # doctest: +SKIP\n MultiIndex([('a', 'x'),\n ('b', 'y')],\n names=['aloha', 'databricks'])\n \"\"\"\n names = self._verify_for_rename(name)\n\n internal = self._psdf._internal.copy(index_names=names)\n\n if inplace:\n self._psdf._update_internal_frame(internal)\n return None\n else:\n return DataFrame(internal).index\n\n def _verify_for_rename(self, name: Name) -> List[Label]:\n if is_hashable(name):\n if is_name_like_tuple(name):\n return [name]\n elif is_name_like_value(name):\n return [(name,)]\n raise TypeError(\"Index.name must be a hashable type\")\n\n # TODO: add downcast parameter for fillna function\n def fillna(self, value: Scalar) -> \"Index\":\n \"\"\"\n Fill NA/NaN values with the specified value.\n\n Parameters\n ----------\n value : scalar\n Scalar value to use to fill holes (example: 0). This value cannot be a list-likes.\n\n Returns\n -------\n Index :\n filled with value\n\n Examples\n --------\n >>> idx = ps.Index([1, 2, None])\n >>> idx\n Float64Index([1.0, 2.0, nan], dtype='float64')\n\n >>> idx.fillna(0)\n Float64Index([1.0, 2.0, 0.0], dtype='float64')\n \"\"\"\n if not isinstance(value, (float, int, str, bool)):\n raise TypeError(\"Unsupported type %s\" % type(value).__name__)\n sdf = self._internal.spark_frame.fillna(value)\n\n internal = InternalFrame( # TODO: dtypes?\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n )\n return DataFrame(internal).index\n\n # TODO: ADD keep parameter\n def drop_duplicates(self) -> \"Index\":\n \"\"\"\n Return Index with duplicate values removed.\n\n Returns\n -------\n deduplicated : Index\n\n See Also\n --------\n Series.drop_duplicates : Equivalent method on Series.\n DataFrame.drop_duplicates : Equivalent method on DataFrame.\n\n Examples\n --------\n Generate an pandas.Index with duplicate values.\n\n >>> idx = ps.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])\n\n >>> idx.drop_duplicates().sort_values()\n Index(['beetle', 'cow', 'hippo', 'lama'], dtype='object')\n \"\"\"\n sdf = self._internal.spark_frame.select(\n self._internal.index_spark_columns\n ).drop_duplicates()\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=self._internal.index_fields,\n )\n return DataFrame(internal).index\n\n def to_series(self, name: Optional[Name] = None) -> Series:\n \"\"\"\n Create a Series with both index and values equal to the index keys\n useful with map for returning an indexer based on an index.\n\n Parameters\n ----------\n name : string, optional\n name of resulting Series. If None, defaults to name of original\n index\n\n Returns\n -------\n Series : dtype will be based on the type of the Index values.\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'],\n ... index=list('abcd'))\n >>> df['dogs'].index.to_series()\n a a\n b b\n c c\n d d\n dtype: object\n \"\"\"\n if not is_hashable(name):\n raise TypeError(\"Series.name must be a hashable type\")\n scol = self.spark.column\n field = self._internal.data_fields[0]\n if name is not None:\n scol = scol.alias(name_like_string(name))\n field = field.copy(name=name_like_string(name))\n elif self._internal.index_level == 1:\n name = self.name\n column_labels: List[Optional[Label]] = [name if is_name_like_tuple(name) else (name,)]\n internal = self._internal.copy(\n column_labels=column_labels,\n data_spark_columns=[scol],\n data_fields=[field],\n column_label_names=None,\n )\n return first_series(DataFrame(internal))\n\n def to_frame(self, index: bool = True, name: Optional[Name] = None) -> DataFrame:\n \"\"\"\n Create a DataFrame with a column containing the Index.\n\n Parameters\n ----------\n index : boolean, default True\n Set the index of the returned DataFrame as the original Index.\n name : object, default None\n The passed name should substitute for the index name (if it has\n one).\n\n Returns\n -------\n DataFrame\n DataFrame containing the original Index data.\n\n See Also\n --------\n Index.to_series : Convert an Index to a Series.\n Series.to_frame : Convert Series to DataFrame.\n\n Examples\n --------\n >>> idx = ps.Index(['Ant', 'Bear', 'Cow'], name='animal')\n >>> idx.to_frame() # doctest: +NORMALIZE_WHITESPACE\n animal\n animal\n Ant Ant\n Bear Bear\n Cow Cow\n\n By default, the original Index is reused. To enforce a new Index:\n\n >>> idx.to_frame(index=False)\n animal\n 0 Ant\n 1 Bear\n 2 Cow\n\n To override the name of the resulting column, specify `name`:\n\n >>> idx.to_frame(name='zoo') # doctest: +NORMALIZE_WHITESPACE\n zoo\n animal\n Ant Ant\n Bear Bear\n Cow Cow\n \"\"\"\n if name is None:\n if self._internal.index_names[0] is None:\n name = (DEFAULT_SERIES_NAME,)\n else:\n name = self._internal.index_names[0]\n elif not is_name_like_tuple(name):\n if is_name_like_value(name):\n name = (name,)\n else:\n raise TypeError(\"unhashable type: '{}'\".format(type(name).__name__))\n\n return self._to_frame(index=index, names=[name])\n\n def _to_frame(self, index: bool, names: List[Label]) -> DataFrame:\n if index:\n index_spark_columns = self._internal.index_spark_columns\n index_names = self._internal.index_names\n index_fields = self._internal.index_fields\n else:\n index_spark_columns = []\n index_names = []\n index_fields = []\n\n internal = InternalFrame(\n spark_frame=self._internal.spark_frame,\n index_spark_columns=index_spark_columns,\n index_names=index_names,\n index_fields=index_fields,\n column_labels=names,\n data_spark_columns=self._internal.index_spark_columns,\n data_fields=self._internal.index_fields,\n )\n return DataFrame(internal)\n\n def is_boolean(self) -> bool:\n \"\"\"\n Return if the current index type is a boolean type.\n\n Examples\n --------\n >>> ps.DataFrame({'a': [1]}, index=[True]).index.is_boolean()\n True\n \"\"\"\n return is_bool_dtype(self.dtype)\n\n def is_categorical(self) -> bool:\n \"\"\"\n Return if the current index type is a categorical type.\n\n Examples\n --------\n >>> ps.DataFrame({'a': [1]}, index=[1]).index.is_categorical()\n False\n \"\"\"\n return is_categorical_dtype(self.dtype)\n\n def is_floating(self) -> bool:\n \"\"\"\n Return if the current index type is a floating type.\n\n Examples\n --------\n >>> ps.DataFrame({'a': [1]}, index=[1]).index.is_floating()\n False\n \"\"\"\n return is_float_dtype(self.dtype)\n\n def is_integer(self) -> bool:\n \"\"\"\n Return if the current index type is a integer type.\n\n Examples\n --------\n >>> ps.DataFrame({'a': [1]}, index=[1]).index.is_integer()\n True\n \"\"\"\n return is_integer_dtype(self.dtype)\n\n def is_interval(self) -> bool:\n \"\"\"\n Return if the current index type is an interval type.\n\n Examples\n --------\n >>> ps.DataFrame({'a': [1]}, index=[1]).index.is_interval()\n False\n \"\"\"\n return is_interval_dtype(self.dtype)\n\n def is_numeric(self) -> bool:\n \"\"\"\n Return if the current index type is a numeric type.\n\n Examples\n --------\n >>> ps.DataFrame({'a': [1]}, index=[1]).index.is_numeric()\n True\n \"\"\"\n return is_numeric_dtype(self.dtype)\n\n def is_object(self) -> bool:\n \"\"\"\n Return if the current index type is a object type.\n\n Examples\n --------\n >>> ps.DataFrame({'a': [1]}, index=[\"a\"]).index.is_object()\n True\n \"\"\"\n return is_object_dtype(self.dtype)\n\n def is_type_compatible(self, kind: str) -> bool:\n \"\"\"\n Whether the index type is compatible with the provided type.\n\n Examples\n --------\n >>> psidx = ps.Index([1, 2, 3])\n >>> psidx.is_type_compatible('integer')\n True\n\n >>> psidx = ps.Index([1.0, 2.0, 3.0])\n >>> psidx.is_type_compatible('integer')\n False\n >>> psidx.is_type_compatible('floating')\n True\n \"\"\"\n return kind == self.inferred_type\n\n def dropna(self) -> \"Index\":\n \"\"\"\n Return Index or MultiIndex without NA/NaN values\n\n Examples\n --------\n\n >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', None],\n ... columns=['max_speed', 'shield'])\n >>> df\n max_speed shield\n cobra 1 2\n viper 4 5\n NaN 7 8\n\n >>> df.index.dropna()\n Index(['cobra', 'viper'], dtype='object')\n\n Also support for MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... [None, 'weight', 'length']],\n ... [[0, 1, 1, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 1, 0, 1, 2, 1, 1, 2]])\n >>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, None],\n ... index=midx)\n >>> s\n lama NaN 45.0\n cow weight 200.0\n weight 1.2\n NaN 30.0\n weight 250.0\n length 1.5\n falcon weight 320.0\n weight 1.0\n length NaN\n dtype: float64\n\n >>> s.index.dropna() # doctest: +SKIP\n MultiIndex([( 'cow', 'weight'),\n ( 'cow', 'weight'),\n ( 'cow', 'weight'),\n ( 'cow', 'length'),\n ('falcon', 'weight'),\n ('falcon', 'weight'),\n ('falcon', 'length')],\n )\n \"\"\"\n sdf = self._internal.spark_frame.select(self._internal.index_spark_columns).dropna()\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=self._internal.index_fields,\n )\n return DataFrame(internal).index\n\n def unique(self, level: Optional[Union[int, Name]] = None) -> \"Index\":\n \"\"\"\n Return unique values in the index.\n\n Be aware the order of unique values might be different than pandas.Index.unique\n\n Parameters\n ----------\n level : int or str, optional, default is None\n\n Returns\n -------\n Index without duplicates\n\n See Also\n --------\n Series.unique\n groupby.SeriesGroupBy.unique\n\n Examples\n --------\n >>> ps.DataFrame({'a': ['a', 'b', 'c']}, index=[1, 1, 3]).index.unique().sort_values()\n Int64Index([1, 3], dtype='int64')\n\n >>> ps.DataFrame({'a': ['a', 'b', 'c']}, index=['d', 'e', 'e']).index.unique().sort_values()\n Index(['d', 'e'], dtype='object')\n\n MultiIndex\n\n >>> ps.MultiIndex.from_tuples([(\"A\", \"X\"), (\"A\", \"Y\"), (\"A\", \"X\")]).unique()\n ... # doctest: +SKIP\n MultiIndex([('A', 'X'),\n ('A', 'Y')],\n )\n \"\"\"\n if level is not None:\n self._validate_index_level(level)\n scols = self._internal.index_spark_columns\n sdf = self._psdf._internal.spark_frame.select(scols).distinct()\n return DataFrame(\n InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=self._internal.index_fields,\n )\n ).index\n\n # TODO: add error parameter\n def drop(self, labels: List[Any]) -> \"Index\":\n \"\"\"\n Make new Index with passed list of labels deleted.\n\n Parameters\n ----------\n labels : array-like\n\n Returns\n -------\n dropped : Index\n\n Examples\n --------\n >>> index = ps.Index([1, 2, 3])\n >>> index\n Int64Index([1, 2, 3], dtype='int64')\n\n >>> index.drop([1])\n Int64Index([2, 3], dtype='int64')\n \"\"\"\n internal = self._internal.resolved_copy\n sdf = internal.spark_frame[~internal.index_spark_columns[0].isin(labels)]\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=self._internal.index_fields,\n column_labels=[],\n data_spark_columns=[],\n data_fields=[],\n )\n return DataFrame(internal).index\n\n def _validate_index_level(self, level: Union[int, Name]) -> None:\n \"\"\"\n Validate index level.\n For single-level Index getting level number is a no-op, but some\n verification must be done like in MultiIndex.\n \"\"\"\n if isinstance(level, int):\n if level < 0 and level != -1:\n raise IndexError(\n \"Too many levels: Index has only 1 level,\"\n \" %d is not a valid level number\" % (level,)\n )\n elif level > 0:\n raise IndexError(\"Too many levels:\" \" Index has only 1 level, not %d\" % (level + 1))\n elif level != self.name:\n raise KeyError(\n \"Requested level ({}) does not match index name ({})\".format(level, self.name)\n )\n\n def get_level_values(self, level: Union[int, Name]) -> \"Index\":\n \"\"\"\n Return Index if a valid level is given.\n\n Examples:\n --------\n >>> psidx = ps.Index(['a', 'b', 'c'], name='ks')\n >>> psidx.get_level_values(0)\n Index(['a', 'b', 'c'], dtype='object', name='ks')\n\n >>> psidx.get_level_values('ks')\n Index(['a', 'b', 'c'], dtype='object', name='ks')\n \"\"\"\n self._validate_index_level(level)\n return self\n\n def copy(self, name: Optional[Name] = None, deep: Optional[bool] = None) -> \"Index\":\n \"\"\"\n Make a copy of this object. name sets those attributes on the new object.\n\n Parameters\n ----------\n name : string, optional\n to set name of index\n deep : None\n this parameter is not supported but just dummy parameter to match pandas.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', 'sidewinder'],\n ... columns=['max_speed', 'shield'])\n >>> df\n max_speed shield\n cobra 1 2\n viper 4 5\n sidewinder 7 8\n >>> df.index\n Index(['cobra', 'viper', 'sidewinder'], dtype='object')\n\n Copy index\n\n >>> df.index.copy()\n Index(['cobra', 'viper', 'sidewinder'], dtype='object')\n\n Copy index with name\n\n >>> df.index.copy(name='snake')\n Index(['cobra', 'viper', 'sidewinder'], dtype='object', name='snake')\n \"\"\"\n result = self._psdf[[]].index\n if name:\n result.name = name\n return result\n\n def droplevel(self, level: Union[int, Name, List[Union[int, Name]]]) -> \"Index\":\n \"\"\"\n Return index with requested level(s) removed.\n If resulting index has only 1 level left, the result will be\n of Index type, not MultiIndex.\n\n Parameters\n ----------\n level : int, str, tuple, or list-like, default 0\n If a string is given, must be the name of a level\n If list-like, elements must be names or indexes of levels.\n\n Returns\n -------\n Index or MultiIndex\n\n Examples\n --------\n >>> midx = ps.DataFrame({'a': ['a', 'b']}, index=[['a', 'x'], ['b', 'y'], [1, 2]]).index\n >>> midx # doctest: +SKIP\n MultiIndex([('a', 'b', 1),\n ('x', 'y', 2)],\n )\n >>> midx.droplevel([0, 1]) # doctest: +SKIP\n Int64Index([1, 2], dtype='int64')\n >>> midx.droplevel(0) # doctest: +SKIP\n MultiIndex([('b', 1),\n ('y', 2)],\n )\n >>> midx.names = [(\"a\", \"b\"), \"b\", \"c\"]\n >>> midx.droplevel([('a', 'b')]) # doctest: +SKIP\n MultiIndex([('b', 1),\n ('y', 2)],\n names=['b', 'c'])\n \"\"\"\n names = self.names\n nlevels = self.nlevels\n if not is_list_like(level):\n levels = [cast(Union[int, Name], level)]\n else:\n levels = cast(List[Union[int, Name]], level)\n\n int_level = set()\n for n in levels:\n if isinstance(n, int):\n if n < 0:\n n = n + nlevels\n if n < 0:\n raise IndexError(\n \"Too many levels: Index has only {} levels, \"\n \"{} is not a valid level number\".format(nlevels, (n - nlevels))\n )\n if n >= nlevels:\n raise IndexError(\n \"Too many levels: Index has only {} levels, not {}\".format(nlevels, n + 1)\n )\n else:\n if n not in names:\n raise KeyError(\"Level {} not found\".format(n))\n n = names.index(n)\n int_level.add(n)\n\n if len(levels) >= nlevels:\n raise ValueError(\n \"Cannot remove {} levels from an index with {} \"\n \"levels: at least one level must be \"\n \"left.\".format(len(levels), nlevels)\n )\n\n index_spark_columns, index_names, index_fields = zip(\n *[\n item\n for i, item in enumerate(\n zip(\n self._internal.index_spark_columns,\n self._internal.index_names,\n self._internal.index_fields,\n )\n )\n if i not in int_level\n ]\n )\n\n internal = self._internal.copy(\n index_spark_columns=list(index_spark_columns),\n index_names=list(index_names),\n index_fields=list(index_fields),\n column_labels=[],\n data_spark_columns=[],\n data_fields=[],\n )\n return DataFrame(internal).index\n\n def symmetric_difference(\n self,\n other: \"Index\",\n result_name: Optional[Name] = None,\n sort: Optional[bool] = None,\n ) -> \"Index\":\n \"\"\"\n Compute the symmetric difference of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n result_name : str\n sort : True or None, default None\n Whether to sort the resulting index.\n * True : Attempt to sort the result.\n * None : Do not sort the result.\n\n Returns\n -------\n symmetric_difference : Index\n\n Notes\n -----\n ``symmetric_difference`` contains elements that appear in either\n ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by\n ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates\n dropped.\n\n Examples\n --------\n >>> s1 = ps.Series([1, 2, 3, 4], index=[1, 2, 3, 4])\n >>> s2 = ps.Series([1, 2, 3, 4], index=[2, 3, 4, 5])\n\n >>> s1.index.symmetric_difference(s2.index) # doctest: +SKIP\n Int64Index([5, 1], dtype='int64')\n\n You can set name of result Index.\n\n >>> s1.index.symmetric_difference(s2.index, result_name='pandas-on-Spark') # doctest: +SKIP\n Int64Index([5, 1], dtype='int64', name='pandas-on-Spark')\n\n You can set sort to `True`, if you want to sort the resulting index.\n\n >>> s1.index.symmetric_difference(s2.index, sort=True)\n Int64Index([1, 5], dtype='int64')\n\n You can also use the ``^`` operator:\n\n >>> s1.index ^ s2.index # doctest: +SKIP\n Int64Index([5, 1], dtype='int64')\n \"\"\"\n if type(self) != type(other):\n raise NotImplementedError(\n \"Doesn't support symmetric_difference between Index & MultiIndex for now\"\n )\n\n sdf_self = self._psdf._internal.spark_frame.select(self._internal.index_spark_columns)\n sdf_other = other._psdf._internal.spark_frame.select(other._internal.index_spark_columns)\n\n sdf_symdiff = sdf_self.union(sdf_other).subtract(sdf_self.intersect(sdf_other))\n\n if sort:\n sdf_symdiff = sdf_symdiff.sort(*self._internal.index_spark_column_names)\n\n internal = InternalFrame(\n spark_frame=sdf_symdiff,\n index_spark_columns=[\n scol_for(sdf_symdiff, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=self._internal.index_fields,\n )\n result = DataFrame(internal).index\n\n if result_name:\n result.name = result_name\n\n return result\n\n # TODO: return_indexer\n def sort_values(self, ascending: bool = True) -> \"Index\":\n \"\"\"\n Return a sorted copy of the index.\n\n .. note:: This method is not supported for pandas when index has NaN value.\n pandas raises unexpected TypeError, but we support treating NaN\n as the smallest value.\n\n Parameters\n ----------\n ascending : bool, default True\n Should the index values be sorted in an ascending order.\n\n Returns\n -------\n sorted_index : ps.Index or ps.MultiIndex\n Sorted copy of the index.\n\n See Also\n --------\n Series.sort_values : Sort values of a Series.\n DataFrame.sort_values : Sort values in a DataFrame.\n\n Examples\n --------\n >>> idx = ps.Index([10, 100, 1, 1000])\n >>> idx\n Int64Index([10, 100, 1, 1000], dtype='int64')\n\n Sort values in ascending order (default behavior).\n\n >>> idx.sort_values()\n Int64Index([1, 10, 100, 1000], dtype='int64')\n\n Sort values in descending order.\n\n >>> idx.sort_values(ascending=False)\n Int64Index([1000, 100, 10, 1], dtype='int64')\n\n Support for MultiIndex.\n\n >>> psidx = ps.MultiIndex.from_tuples([('a', 'x', 1), ('c', 'y', 2), ('b', 'z', 3)])\n >>> psidx # doctest: +SKIP\n MultiIndex([('a', 'x', 1),\n ('c', 'y', 2),\n ('b', 'z', 3)],\n )\n\n >>> psidx.sort_values() # doctest: +SKIP\n MultiIndex([('a', 'x', 1),\n ('b', 'z', 3),\n ('c', 'y', 2)],\n )\n\n >>> psidx.sort_values(ascending=False) # doctest: +SKIP\n MultiIndex([('c', 'y', 2),\n ('b', 'z', 3),\n ('a', 'x', 1)],\n )\n \"\"\"\n sdf = self._internal.spark_frame\n sdf = sdf.orderBy(*self._internal.index_spark_columns, ascending=ascending).select(\n self._internal.index_spark_columns\n )\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=self._internal.index_fields,\n )\n return DataFrame(internal).index\n\n @no_type_check\n def sort(self, *args, **kwargs) -> None:\n \"\"\"\n Use sort_values instead.\n \"\"\"\n raise TypeError(\"cannot sort an Index object in-place, use sort_values instead\")\n\n def min(self) -> Union[Scalar, Tuple[Scalar, ...]]:\n \"\"\"\n Return the minimum value of the Index.\n\n Returns\n -------\n scalar\n Minimum value.\n\n See Also\n --------\n Index.max : Return the maximum value of the object.\n Series.min : Return the minimum value in a Series.\n DataFrame.min : Return the minimum values in a DataFrame.\n\n Examples\n --------\n >>> idx = ps.Index([3, 2, 1])\n >>> idx.min()\n 1\n\n >>> idx = ps.Index(['c', 'b', 'a'])\n >>> idx.min()\n 'a'\n\n For a MultiIndex, the maximum is determined lexicographically.\n\n >>> idx = ps.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2)])\n >>> idx.min()\n ('a', 'x', 1)\n \"\"\"\n sdf = self._internal.spark_frame\n min_row = cast(\n pd.DataFrame,\n sdf.select(F.min(F.struct(*self._internal.index_spark_columns)).alias(\"min_row\"))\n .select(\"min_row.*\")\n .toPandas(),\n )\n result = tuple(min_row.iloc[0])\n\n return result if len(result) > 1 else result[0]\n\n def max(self) -> Union[Scalar, Tuple[Scalar, ...]]:\n \"\"\"\n Return the maximum value of the Index.\n\n Returns\n -------\n scalar\n Maximum value.\n\n See Also\n --------\n Index.min : Return the minimum value in an Index.\n Series.max : Return the maximum value in a Series.\n DataFrame.max : Return the maximum values in a DataFrame.\n\n Examples\n --------\n >>> idx = ps.Index([3, 2, 1])\n >>> idx.max()\n 3\n\n >>> idx = ps.Index(['c', 'b', 'a'])\n >>> idx.max()\n 'c'\n\n For a MultiIndex, the maximum is determined lexicographically.\n\n >>> idx = ps.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2)])\n >>> idx.max()\n ('b', 'y', 2)\n \"\"\"\n sdf = self._internal.spark_frame\n max_row = cast(\n pd.DataFrame,\n sdf.select(F.max(F.struct(*self._internal.index_spark_columns)).alias(\"max_row\"))\n .select(\"max_row.*\")\n .toPandas(),\n )\n result = tuple(max_row.iloc[0])\n\n return result if len(result) > 1 else result[0]\n\n def delete(self, loc: Union[int, List[int]]) -> \"Index\":\n \"\"\"\n Make new Index with passed location(-s) deleted.\n\n .. note:: this API can be pretty expensive since it is based on\n a global sequence internally.\n\n Returns\n -------\n new_index : Index\n\n Examples\n --------\n >>> psidx = ps.Index([10, 10, 9, 8, 4, 2, 4, 4, 2, 2, 10, 10])\n >>> psidx\n Int64Index([10, 10, 9, 8, 4, 2, 4, 4, 2, 2, 10, 10], dtype='int64')\n\n >>> psidx.delete(0).sort_values()\n Int64Index([2, 2, 2, 4, 4, 4, 8, 9, 10, 10, 10], dtype='int64')\n\n >>> psidx.delete([0, 1, 2, 3, 10, 11]).sort_values()\n Int64Index([2, 2, 2, 4, 4, 4], dtype='int64')\n\n MultiIndex\n\n >>> psidx = ps.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])\n >>> psidx # doctest: +SKIP\n MultiIndex([('a', 'x', 1),\n ('b', 'y', 2),\n ('c', 'z', 3)],\n )\n\n >>> psidx.delete([0, 2]).sort_values() # doctest: +SKIP\n MultiIndex([('b', 'y', 2)],\n )\n \"\"\"\n length = len(self)\n\n def is_len_exceeded(index: int) -> bool:\n \"\"\"Check if the given index is exceeded the length or not\"\"\"\n return index >= length if index >= 0 else abs(index) > length\n\n if not is_list_like(loc):\n if is_len_exceeded(cast(int, loc)):\n raise IndexError(\n \"index {} is out of bounds for axis 0 with size {}\".format(loc, length)\n )\n locs = [cast(int, loc)]\n else:\n for index in cast(List[int], loc):\n if is_len_exceeded(index):\n raise IndexError(\n \"index {} is out of bounds for axis 0 with size {}\".format(index, length)\n )\n locs = cast(List[int], loc)\n\n locs = [int(item) for item in locs]\n locs = [item if item >= 0 else length + item for item in locs]\n\n # we need a temporary column such as '__index_value_0__'\n # since 'InternalFrame.attach_default_index' will be failed\n # when self._scol has name of '__index_level_0__'\n index_value_column_format = \"__index_value_{}__\"\n\n sdf = self._internal._sdf\n index_value_column_names = [\n verify_temp_column_name(sdf, index_value_column_format.format(i))\n for i in range(self._internal.index_level)\n ]\n index_value_columns = [\n index_scol.alias(index_vcol_name)\n for index_scol, index_vcol_name in zip(\n self._internal.index_spark_columns, index_value_column_names\n )\n ]\n sdf = sdf.select(index_value_columns)\n\n sdf = InternalFrame.attach_default_index(sdf, default_index_type=\"distributed-sequence\")\n # sdf here looks as below\n # +-----------------+-----------------+-----------------+-----------------+\n # |__index_level_0__|__index_value_0__|__index_value_1__|__index_value_2__|\n # +-----------------+-----------------+-----------------+-----------------+\n # | 0| a| x| 1|\n # | 1| b| y| 2|\n # | 2| c| z| 3|\n # +-----------------+-----------------+-----------------+-----------------+\n\n # delete rows which are matched with given `loc`\n sdf = sdf.where(~F.col(SPARK_INDEX_NAME_FORMAT(0)).isin(locs))\n sdf = sdf.select(index_value_column_names)\n # sdf here looks as below, we should alias them back to origin spark column names\n # +-----------------+-----------------+-----------------+\n # |__index_value_0__|__index_value_1__|__index_value_2__|\n # +-----------------+-----------------+-----------------+\n # | c| z| 3|\n # +-----------------+-----------------+-----------------+\n index_origin_columns = [\n F.col(index_vcol_name).alias(index_scol_name)\n for index_vcol_name, index_scol_name in zip(\n index_value_column_names, self._internal.index_spark_column_names\n )\n ]\n sdf = sdf.select(index_origin_columns)\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=self._internal.index_fields,\n )\n\n return DataFrame(internal).index\n\n def append(self, other: \"Index\") -> \"Index\":\n \"\"\"\n Append a collection of Index options together.\n\n Parameters\n ----------\n other : Index\n\n Returns\n -------\n appended : Index\n\n Examples\n --------\n >>> psidx = ps.Index([10, 5, 0, 5, 10, 5, 0, 10])\n >>> psidx\n Int64Index([10, 5, 0, 5, 10, 5, 0, 10], dtype='int64')\n\n >>> psidx.append(psidx)\n Int64Index([10, 5, 0, 5, 10, 5, 0, 10, 10, 5, 0, 5, 10, 5, 0, 10], dtype='int64')\n\n Support for MiltiIndex\n\n >>> psidx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y')])\n >>> psidx # doctest: +SKIP\n MultiIndex([('a', 'x'),\n ('b', 'y')],\n )\n\n >>> psidx.append(psidx) # doctest: +SKIP\n MultiIndex([('a', 'x'),\n ('b', 'y'),\n ('a', 'x'),\n ('b', 'y')],\n )\n \"\"\"\n from pyspark.pandas.indexes.multi import MultiIndex\n\n if isinstance(self, MultiIndex) != isinstance(other, MultiIndex):\n raise NotImplementedError(\n \"append() between Index & MultiIndex is currently not supported\"\n )\n if self._internal.index_level != other._internal.index_level:\n raise NotImplementedError(\n \"append() between MultiIndexs with different levels is currently not supported\"\n )\n\n index_fields = self._index_fields_for_union_like(other, func_name=\"append\")\n\n sdf_self = self._internal.spark_frame.select(self._internal.index_spark_columns)\n sdf_other = other._internal.spark_frame.select(other._internal.index_spark_columns)\n sdf_appended = sdf_self.union(sdf_other)\n\n # names should be kept when MultiIndex, but Index wouldn't keep its name.\n if isinstance(self, MultiIndex):\n index_names = self._internal.index_names\n else:\n index_names = None\n\n internal = InternalFrame(\n spark_frame=sdf_appended,\n index_spark_columns=[\n scol_for(sdf_appended, col) for col in self._internal.index_spark_column_names\n ],\n index_names=index_names,\n index_fields=index_fields,\n )\n\n return DataFrame(internal).index\n\n def argmax(self) -> int:\n \"\"\"\n Return a maximum argument indexer.\n\n Parameters\n ----------\n skipna : bool, default True\n\n Returns\n -------\n maximum argument indexer\n\n Examples\n --------\n >>> psidx = ps.Index([10, 9, 8, 7, 100, 5, 4, 3, 100, 3])\n >>> psidx\n Int64Index([10, 9, 8, 7, 100, 5, 4, 3, 100, 3], dtype='int64')\n\n >>> psidx.argmax()\n 4\n \"\"\"\n sdf = self._internal.spark_frame.select(self.spark.column)\n sequence_col = verify_temp_column_name(sdf, \"__distributed_sequence_column__\")\n sdf = InternalFrame.attach_distributed_sequence_column(sdf, column_name=sequence_col)\n # spark_frame here looks like below\n # +-----------------+---------------+\n # |__index_level_0__|__index_value__|\n # +-----------------+---------------+\n # | 0| 10|\n # | 4| 100|\n # | 2| 8|\n # | 3| 7|\n # | 6| 4|\n # | 5| 5|\n # | 7| 3|\n # | 8| 100|\n # | 1| 9|\n # +-----------------+---------------+\n\n return (\n sdf.orderBy(\n scol_for(sdf, self._internal.data_spark_column_names[0]).desc(),\n F.col(sequence_col).asc(),\n )\n .select(sequence_col)\n .first()[0]\n )\n\n def argmin(self) -> int:\n \"\"\"\n Return a minimum argument indexer.\n\n Parameters\n ----------\n skipna : bool, default True\n\n Returns\n -------\n minimum argument indexer\n\n Examples\n --------\n >>> psidx = ps.Index([10, 9, 8, 7, 100, 5, 4, 3, 100, 3])\n >>> psidx\n Int64Index([10, 9, 8, 7, 100, 5, 4, 3, 100, 3], dtype='int64')\n\n >>> psidx.argmin()\n 7\n \"\"\"\n sdf = self._internal.spark_frame.select(self.spark.column)\n sequence_col = verify_temp_column_name(sdf, \"__distributed_sequence_column__\")\n sdf = InternalFrame.attach_distributed_sequence_column(sdf, column_name=sequence_col)\n\n return (\n sdf.orderBy(\n scol_for(sdf, self._internal.data_spark_column_names[0]).asc(),\n F.col(sequence_col).asc(),\n )\n .select(sequence_col)\n .first()[0]\n )\n\n def set_names(\n self,\n names: Union[Name, List[Name]],\n level: Optional[Union[int, Name, List[Union[int, Name]]]] = None,\n inplace: bool = False,\n ) -> Optional[\"Index\"]:\n \"\"\"\n Set Index or MultiIndex name.\n Able to set new names partially and by level.\n\n Parameters\n ----------\n names : label or list of label\n Name(s) to set.\n level : int, label or list of int or label, optional\n If the index is a MultiIndex, level(s) to set (None for all\n levels). Otherwise level must be None.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Index or\n MultiIndex.\n\n Returns\n -------\n Index\n The same type as the caller or None if inplace is True.\n\n See Also\n --------\n Index.rename : Able to set new names without level.\n\n Examples\n --------\n >>> idx = ps.Index([1, 2, 3, 4])\n >>> idx\n Int64Index([1, 2, 3, 4], dtype='int64')\n\n >>> idx.set_names('quarter')\n Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')\n\n For MultiIndex\n\n >>> idx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y')])\n >>> idx # doctest: +SKIP\n MultiIndex([('a', 'x'),\n ('b', 'y')],\n )\n\n >>> idx.set_names(['kind', 'year'], inplace=True)\n >>> idx # doctest: +SKIP\n MultiIndex([('a', 'x'),\n ('b', 'y')],\n names=['kind', 'year'])\n\n >>> idx.set_names('species', level=0) # doctest: +SKIP\n MultiIndex([('a', 'x'),\n ('b', 'y')],\n names=['species', 'year'])\n \"\"\"\n from pyspark.pandas.indexes.multi import MultiIndex\n\n if isinstance(self, MultiIndex):\n if level is not None:\n self_names = self.names\n self_names[level] = names # type: ignore[index]\n names = self_names\n return self.rename(name=names, inplace=inplace)\n\n def difference(self, other: \"Index\", sort: Optional[bool] = None) -> \"Index\":\n \"\"\"\n Return a new Index with elements from the index that are not in\n `other`.\n\n This is the set difference of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n sort : True or None, default None\n Whether to sort the resulting index.\n * True : Attempt to sort the result.\n * None : Do not sort the result.\n\n Returns\n -------\n difference : Index\n\n Examples\n --------\n\n >>> idx1 = ps.Index([2, 1, 3, 4])\n >>> idx2 = ps.Index([3, 4, 5, 6])\n >>> idx1.difference(idx2, sort=True)\n Int64Index([1, 2], dtype='int64')\n\n MultiIndex\n\n >>> midx1 = ps.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])\n >>> midx2 = ps.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'z', 2), ('k', 'z', 3)])\n >>> midx1.difference(midx2) # doctest: +SKIP\n MultiIndex([('b', 'y', 2),\n ('c', 'z', 3)],\n )\n \"\"\"\n from pyspark.pandas.indexes.multi import MultiIndex\n\n # Check if the `self` and `other` have different index types.\n # 1. `self` is Index, `other` is MultiIndex\n # 2. `self` is MultiIndex, `other` is Index\n is_index_types_different = isinstance(other, Index) and not isinstance(self, type(other))\n if is_index_types_different:\n if isinstance(self, MultiIndex):\n # In case `self` is MultiIndex and `other` is Index,\n # return MultiIndex without its names.\n return self.rename([None] * len(self))\n elif isinstance(self, Index):\n # In case `self` is Index and `other` is MultiIndex,\n # return Index without its name.\n return self.rename(None)\n\n if not isinstance(other, (Index, Series, tuple, list, set, dict)):\n raise TypeError(\"Input must be Index or array-like\")\n if not isinstance(sort, (type(None), type(True))):\n raise ValueError(\n \"The 'sort' keyword only takes the values of None or True; {} was passed.\".format(\n sort\n )\n )\n # Handling MultiIndex when `other` is not MultiIndex.\n if isinstance(self, MultiIndex) and not isinstance(other, MultiIndex):\n is_other_list_of_tuples = isinstance(other, (list, set, dict)) and all(\n [isinstance(item, tuple) for item in other]\n )\n if is_other_list_of_tuples:\n other = MultiIndex.from_tuples(other) # type: ignore[arg-type]\n else:\n raise TypeError(\"other must be a MultiIndex or a list of tuples\")\n\n if not isinstance(other, Index):\n other = Index(other)\n\n sdf_self = self._internal.spark_frame\n sdf_other = other._internal.spark_frame\n idx_self = self._internal.index_spark_columns\n idx_other = other._internal.index_spark_columns\n sdf_diff = sdf_self.select(idx_self).subtract(sdf_other.select(idx_other))\n internal = InternalFrame(\n spark_frame=sdf_diff,\n index_spark_columns=[\n scol_for(sdf_diff, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=self._internal.index_fields,\n )\n result = DataFrame(internal).index\n # Name(s) will be kept when only name(s) of (Multi)Index are the same.\n if isinstance(self, type(other)) and isinstance(self, MultiIndex):\n if self.names == other.names:\n result.names = self.names\n elif isinstance(self, type(other)) and not isinstance(self, MultiIndex):\n if self.name == other.name:\n result.name = self.name\n return result if sort is None else result.sort_values()\n\n @property\n def is_all_dates(self) -> bool:\n \"\"\"\n Return if all data types of the index are datetime.\n remember that since pandas-on-Spark does not support multiple data types in an index,\n so it returns True if any type of data is datetime.\n\n Examples\n --------\n >>> from datetime import datetime\n\n >>> idx = ps.Index([datetime(2019, 1, 1, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0)])\n >>> idx\n DatetimeIndex(['2019-01-01', '2019-02-03'], dtype='datetime64[ns]', freq=None)\n\n >>> idx.is_all_dates\n True\n\n >>> idx = ps.Index([datetime(2019, 1, 1, 0, 0, 0), None])\n >>> idx\n DatetimeIndex(['2019-01-01', 'NaT'], dtype='datetime64[ns]', freq=None)\n\n >>> idx.is_all_dates\n True\n\n >>> idx = ps.Index([0, 1, 2])\n >>> idx\n Int64Index([0, 1, 2], dtype='int64')\n\n >>> idx.is_all_dates\n False\n \"\"\"\n return isinstance(self.spark.data_type, (TimestampType, TimestampNTZType))\n\n def repeat(self, repeats: int) -> \"Index\":\n \"\"\"\n Repeat elements of a Index/MultiIndex.\n\n Returns a new Index/MultiIndex where each element of the current Index/MultiIndex\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n Index.\n\n Returns\n -------\n repeated_index : Index/MultiIndex\n Newly created Index/MultiIndex with repeated elements.\n\n See Also\n --------\n Series.repeat : Equivalent function for Series.\n\n Examples\n --------\n >>> idx = ps.Index(['a', 'b', 'c'])\n >>> idx\n Index(['a', 'b', 'c'], dtype='object')\n >>> idx.repeat(2)\n Index(['a', 'b', 'c', 'a', 'b', 'c'], dtype='object')\n\n For MultiIndex,\n\n >>> midx = ps.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')])\n >>> midx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('y', 'c')],\n )\n >>> midx.repeat(2) # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('y', 'c'),\n ('x', 'a'),\n ('x', 'b'),\n ('y', 'c')],\n )\n >>> midx.repeat(0) # doctest: +SKIP\n MultiIndex([], )\n \"\"\"\n if not isinstance(repeats, int):\n raise TypeError(\n \"`repeats` argument must be integer, but got {}\".format(type(repeats).__name__)\n )\n elif repeats < 0:\n raise ValueError(\"negative dimensions are not allowed\")\n\n psdf: DataFrame = DataFrame(self._internal.resolved_copy)\n if repeats == 0:\n return DataFrame(psdf._internal.with_filter(SF.lit(False))).index\n else:\n return ps.concat([psdf] * repeats).index\n\n def asof(self, label: Any) -> Scalar:\n \"\"\"\n Return the label from the index, or, if not present, the previous one.\n\n Assuming that the index is sorted, return the passed index label if it\n is in the index, or return the previous index label if the passed one\n is not in the index.\n\n .. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`\n which can be expensive.\n\n Parameters\n ----------\n label : object\n The label up to which the method returns the latest index label.\n\n Returns\n -------\n object\n The passed label if it is in the index. The previous label if the\n passed label is not in the sorted index or `NaN` if there is no\n such label.\n\n Examples\n --------\n `Index.asof` returns the latest index label up to the passed label.\n\n >>> idx = ps.Index(['2013-12-31', '2014-01-02', '2014-01-03'])\n >>> idx.asof('2014-01-01')\n '2013-12-31'\n\n If the label is in the index, the method returns the passed label.\n\n >>> idx.asof('2014-01-02')\n '2014-01-02'\n\n If all of the labels in the index are later than the passed label,\n NaN is returned.\n\n >>> idx.asof('1999-01-02')\n nan\n \"\"\"\n sdf = self._internal.spark_frame\n if self.is_monotonic_increasing:\n sdf = sdf.where(self.spark.column <= SF.lit(label).cast(self.spark.data_type)).select(\n F.max(self.spark.column)\n )\n elif self.is_monotonic_decreasing:\n sdf = sdf.where(self.spark.column >= SF.lit(label).cast(self.spark.data_type)).select(\n F.min(self.spark.column)\n )\n else:\n raise ValueError(\"index must be monotonic increasing or decreasing\")\n\n result = cast(pd.DataFrame, sdf.toPandas()).iloc[0, 0]\n return result if result is not None else np.nan\n\n def _index_fields_for_union_like(\n self: \"Index\", other: \"Index\", func_name: str\n ) -> Optional[List[InternalField]]:\n if self._internal.index_fields == other._internal.index_fields:\n return self._internal.index_fields\n elif all(\n left.dtype == right.dtype\n and (isinstance(left.dtype, CategoricalDtype) or left.spark_type == right.spark_type)\n for left, right in zip(self._internal.index_fields, other._internal.index_fields)\n ):\n return [\n left.copy(nullable=left.nullable or right.nullable)\n if left.spark_type == right.spark_type\n else InternalField(dtype=left.dtype)\n for left, right in zip(self._internal.index_fields, other._internal.index_fields)\n ]\n elif any(\n isinstance(field.dtype, CategoricalDtype)\n for field in self._internal.index_fields + other._internal.index_fields\n ):\n # TODO: non-categorical or categorical with different categories\n raise NotImplementedError(\n \"{}() between CategoricalIndex and non-categorical or \"\n \"categorical with different categories is currently not supported\".format(func_name)\n )\n else:\n return None\n\n def union(\n self, other: Union[DataFrame, Series, \"Index\", List], sort: Optional[bool] = None\n ) -> \"Index\":\n \"\"\"\n Form the union of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n sort : bool or None, default None\n Whether to sort the resulting Index.\n\n Returns\n -------\n union : Index\n\n Examples\n --------\n\n Index\n\n >>> idx1 = ps.Index([1, 2, 3, 4])\n >>> idx2 = ps.Index([3, 4, 5, 6])\n >>> idx1.union(idx2).sort_values()\n Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')\n\n MultiIndex\n\n >>> midx1 = ps.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"c\"), (\"x\", \"d\")])\n >>> midx2 = ps.MultiIndex.from_tuples([(\"x\", \"c\"), (\"x\", \"d\"), (\"x\", \"e\"), (\"x\", \"f\")])\n >>> midx1.union(midx2).sort_values() # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('x', 'c'),\n ('x', 'd'),\n ('x', 'e'),\n ('x', 'f')],\n )\n \"\"\"\n from pyspark.pandas.indexes.multi import MultiIndex\n\n sort = True if sort is None else sort\n sort = validate_bool_kwarg(sort, \"sort\")\n other_idx: Index\n if isinstance(self, MultiIndex):\n if isinstance(other, MultiIndex):\n other_idx = other\n elif isinstance(other, list) and all(isinstance(item, tuple) for item in other):\n other_idx = MultiIndex.from_tuples(other)\n else:\n raise TypeError(\"other must be a MultiIndex or a list of tuples\")\n else:\n if isinstance(other, MultiIndex):\n # TODO: We can't support different type of values in a single column for now.\n raise NotImplementedError(\"Union between Index and MultiIndex is not yet supported\")\n elif isinstance(other, DataFrame):\n raise ValueError(\"Index data must be 1-dimensional\")\n else:\n other_idx = Index(other)\n\n index_fields = self._index_fields_for_union_like(other_idx, func_name=\"union\")\n\n sdf_self = self._internal.spark_frame.select(self._internal.index_spark_columns)\n sdf_other = other_idx._internal.spark_frame.select(other_idx._internal.index_spark_columns)\n sdf = sdf_self.unionAll(sdf_other).exceptAll(sdf_self.intersectAll(sdf_other))\n if sort:\n sdf = sdf.sort(*self._internal.index_spark_column_names)\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=index_fields,\n )\n\n return DataFrame(internal).index\n\n def holds_integer(self) -> bool:\n \"\"\"\n Whether the type is an integer type.\n Always return False for MultiIndex.\n\n Notes\n -----\n When Index contains null values the result can be different with pandas\n since pandas-on-Spark cast integer to float when Index contains null values.\n\n >>> ps.Index([1, 2, 3, None])\n Float64Index([1.0, 2.0, 3.0, nan], dtype='float64')\n\n Examples\n --------\n >>> psidx = ps.Index([1, 2, 3, 4])\n >>> psidx.holds_integer()\n True\n\n Returns False for string type.\n\n >>> psidx = ps.Index([\"A\", \"B\", \"C\", \"D\"])\n >>> psidx.holds_integer()\n False\n\n Returns False for float type.\n\n >>> psidx = ps.Index([1.1, 2.2, 3.3, 4.4])\n >>> psidx.holds_integer()\n False\n \"\"\"\n return isinstance(self.spark.data_type, IntegralType)\n\n def intersection(self, other: Union[DataFrame, Series, \"Index\", List]) -> \"Index\":\n \"\"\"\n Form the intersection of two Index objects.\n\n This returns a new Index with elements common to the index and `other`.\n\n Parameters\n ----------\n other : Index or array-like\n\n Returns\n -------\n intersection : Index\n\n Examples\n --------\n >>> idx1 = ps.Index([1, 2, 3, 4])\n >>> idx2 = ps.Index([3, 4, 5, 6])\n >>> idx1.intersection(idx2).sort_values()\n Int64Index([3, 4], dtype='int64')\n \"\"\"\n from pyspark.pandas.indexes.multi import MultiIndex\n\n other_idx: Index\n if isinstance(other, DataFrame):\n raise ValueError(\"Index data must be 1-dimensional\")\n elif isinstance(other, MultiIndex):\n # Always returns a no-named empty Index if `other` is MultiIndex.\n return self._psdf.head(0).index.rename(None)\n elif isinstance(other, Index):\n other_idx = other\n spark_frame_other = other_idx.to_frame()._to_spark()\n keep_name = self.name == other_idx.name\n elif isinstance(other, Series):\n other_idx = Index(other)\n spark_frame_other = other_idx.to_frame()._to_spark()\n keep_name = True\n elif is_list_like(other):\n other_idx = Index(other)\n if isinstance(other_idx, MultiIndex):\n return other_idx.to_frame().head(0).index\n spark_frame_other = other_idx.to_frame()._to_spark()\n keep_name = True\n else:\n raise TypeError(\"Input must be Index or array-like\")\n\n index_fields = self._index_fields_for_union_like(other_idx, func_name=\"intersection\")\n\n spark_frame_self = self.to_frame(name=SPARK_DEFAULT_INDEX_NAME)._to_spark()\n spark_frame_intersected = spark_frame_self.intersect(spark_frame_other)\n if keep_name:\n index_names = self._internal.index_names\n else:\n index_names = None\n\n internal = InternalFrame(\n spark_frame=spark_frame_intersected,\n index_spark_columns=[scol_for(spark_frame_intersected, SPARK_DEFAULT_INDEX_NAME)],\n index_names=index_names,\n index_fields=index_fields,\n )\n\n return DataFrame(internal).index\n\n def item(self) -> Union[Scalar, Tuple[Scalar, ...]]:\n \"\"\"\n Return the first element of the underlying data as a python scalar.\n\n Returns\n -------\n scalar\n The first element of Index.\n\n Raises\n ------\n ValueError\n If the data is not length-1.\n\n Examples\n --------\n >>> psidx = ps.Index([10])\n >>> psidx.item()\n 10\n \"\"\"\n return self.to_series().item()\n\n def insert(self, loc: int, item: Any) -> \"Index\":\n \"\"\"\n Make new Index inserting new item at location.\n\n Follows Python list.append semantics for negative values.\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n new_index : Index\n\n Examples\n --------\n >>> psidx = ps.Index([1, 2, 3, 4, 5])\n >>> psidx.insert(3, 100)\n Int64Index([1, 2, 3, 100, 4, 5], dtype='int64')\n\n For negative values\n\n >>> psidx = ps.Index([1, 2, 3, 4, 5])\n >>> psidx.insert(-3, 100)\n Int64Index([1, 2, 100, 3, 4, 5], dtype='int64')\n \"\"\"\n if loc < 0:\n length = len(self)\n loc = loc + length\n loc = 0 if loc < 0 else loc\n\n index_name = self._internal.index_spark_column_names[0]\n sdf_before = self.to_frame(name=index_name)[:loc]._to_spark()\n sdf_middle = Index([item], dtype=self.dtype).to_frame(name=index_name)._to_spark()\n sdf_after = self.to_frame(name=index_name)[loc:]._to_spark()\n sdf = sdf_before.union(sdf_middle).union(sdf_after)\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=[InternalField(field.dtype) for field in self._internal.index_fields],\n )\n return DataFrame(internal).index\n\n def view(self) -> \"Index\":\n \"\"\"\n this is defined as a copy with the same identity\n \"\"\"\n return self.copy()\n\n def to_list(self) -> List:\n \"\"\"\n Return a list of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n .. note:: This method should only be used if the resulting list is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Examples\n --------\n Index\n\n >>> idx = ps.Index([1, 2, 3, 4, 5])\n >>> idx.to_list()\n [1, 2, 3, 4, 5]\n\n MultiIndex\n\n >>> tuples = [(1, 'red'), (1, 'blue'), (2, 'red'), (2, 'green')]\n >>> midx = ps.MultiIndex.from_tuples(tuples)\n >>> midx.to_list()\n [(1, 'red'), (1, 'blue'), (2, 'red'), (2, 'green')]\n \"\"\"\n log_advice(\n \"`to_list` loads all data into the driver's memory. \"\n \"It should only be used if the resulting list is expected to be small.\"\n )\n return self._to_internal_pandas().tolist()\n\n tolist = to_list\n\n @property\n def inferred_type(self) -> str:\n \"\"\"\n Return a string of the type inferred from the values.\n\n Examples\n --------\n >>> from datetime import datetime\n >>> ps.Index([1, 2, 3]).inferred_type\n 'integer'\n\n >>> ps.Index([1.0, 2.0, 3.0]).inferred_type\n 'floating'\n\n >>> ps.Index(['a', 'b', 'c']).inferred_type\n 'string'\n\n >>> ps.Index([True, False, True, False]).inferred_type\n 'boolean'\n \"\"\"\n return lib.infer_dtype([self.to_series().head(1).item()])\n\n def __getattr__(self, item: str) -> Any:\n if hasattr(MissingPandasLikeIndex, item):\n property_or_func = getattr(MissingPandasLikeIndex, item)\n if isinstance(property_or_func, property):\n return property_or_func.fget(self)\n else:\n return partial(property_or_func, self)\n raise AttributeError(\"'{}' object has no attribute '{}'\".format(type(self).__name__, item))\n\n def __repr__(self) -> str:\n max_display_count = get_option(\"display.max_rows\")\n if max_display_count is None:\n return repr(self._to_internal_pandas())\n\n pindex = self._psdf._get_or_create_repr_pandas_cache(max_display_count).index\n\n pindex_length = len(pindex)\n repr_string = repr(pindex[:max_display_count])\n\n if pindex_length > max_display_count:\n footer = \"\\nShowing only the first {}\".format(max_display_count)\n return repr_string + footer\n return repr_string\n\n def __iter__(self) -> Iterator:\n return MissingPandasLikeIndex.__iter__(self)\n\n def __and__(self, other: \"Index\") -> \"Index\":\n warnings.warn(\n \"Index.__and__ operating as a set operation is deprecated, \"\n \"in the future this will be a logical operation matching Series.__and__. \"\n \"Use index.intersection(other) instead\",\n FutureWarning,\n )\n return self.intersection(other)\n\n def __or__(self, other: \"Index\") -> \"Index\":\n warnings.warn(\n \"Index.__or__ operating as a set operation is deprecated, \"\n \"in the future this will be a logical operation matching Series.__or__. \"\n \"Use index.union(other) instead\",\n FutureWarning,\n )\n return self.union(other)\n\n def __xor__(self, other: \"Index\") -> \"Index\":\n warnings.warn(\n \"Index.__xor__ operating as a set operation is deprecated, \"\n \"in the future this will be a logical operation matching Series.__xor__. \"\n \"Use index.symmetric_difference(other) instead\",\n FutureWarning,\n )\n return self.symmetric_difference(other)\n\n def __rxor__(self, other: Any) -> \"Index\":\n return NotImplemented\n\n def __bool__(self) -> bool:\n raise ValueError(\n \"The truth value of a {0} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\".format(self.__class__.__name__)\n )\n\n\ndef _test() -> None:\n import os\n import doctest\n import sys\n from pyspark.sql import SparkSession\n import pyspark.pandas.indexes.base\n\n os.chdir(os.environ[\"SPARK_HOME\"])\n\n globs = pyspark.pandas.indexes.base.__dict__.copy()\n globs[\"ps\"] = pyspark.pandas\n spark = (\n SparkSession.builder.master(\"local[4]\")\n .appName(\"pyspark.pandas.indexes.base tests\")\n .getOrCreate()\n )\n (failure_count, test_count) = doctest.testmod(\n pyspark.pandas.indexes.base,\n globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,\n )\n spark.stop()\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n" ]
[ [ "pandas.Index", "pandas.api.types.is_list_like", "pandas.api.types.is_hashable", "pandas.api.types.is_interval_dtype", "pandas.api.types.is_float_dtype", "pandas.core.accessor.CachedAccessor", "pandas.api.types.is_numeric_dtype", "pandas.io.formats.printing.pprint_thing", "pandas.api.types.is_object_dtype", "pandas.api.types.is_bool_dtype", "pandas.api.types.is_categorical_dtype", "pandas.api.types.is_integer_dtype" ] ]
ponxosio/Dynamic-communitie-Immigrants-MOGA
[ "990a7e00a24b574be4d135eeee5ef08af3a86c49" ]
[ "jupyter_home/sources/gas/dcd_gas_immigrants_combine_reparators.py" ]
[ "import math\n\nimport numpy as np\nfrom deap import tools\n\nimport sources.gas.auxiliary_funtions as auxf\nfrom sources.gas import RANDOM, IMMIGRANT\nfrom sources.gas.dynamic_communities_ga_standard import DynamicCommunitiesGAStandard\nfrom sources.gas.dcd_gas_immigrants_combine_reparators_config import DCDGasImmigrantsCombineReparatorsConfig\nfrom sources.gas.nsga2_skeleton import NSGAIISkeleton\nfrom sources.gloaders.loader_interface import LoaderInterface\n\n\nclass DCDGasImmigrantsCombineReparators(DynamicCommunitiesGAStandard):\n\n def __init__(self, dataset: LoaderInterface, config: DCDGasImmigrantsCombineReparatorsConfig):\n super().__init__(dataset, config)\n\n def make_dict(self):\n return self.config.make_dict()\n\n def find_communities(self):\n\n snp_size = len(self.dataset.snapshots)\n snapshot_members = [None] * snp_size\n snapshot_generations = [None] * snp_size\n snapshot_whole_population = [None] * snp_size\n snapshot_pareto = [None] * snp_size\n\n immigrants = [None] * snp_size\n repaired_list = [None] * snp_size\n\n generations_taken = [0]*snp_size\n\n # population types\n snapshot_population_types = []\n\n best_pop = None\n for i in range(snp_size):\n print(\"working on snapshot {0}...\".format(i))\n self.config.set_snapshot(i)\n\n if i is 0:\n actual_g = self.dataset.snapshots[i]\n # Initialize GA\n toolbox = self.config.ga_configs.make_toolbox(actual_g)\n\n n_gen_repaired = [0] * self.config.get_ga_config().population_size\n n_elite = 0\n n_random = self.config.get_ga_config().population_size\n pop_initial = [toolbox.individual(RANDOM) for i in range(self.config.get_ga_config().population_size)]\n\n # Evaluate the individuals\n fitnesses = toolbox.map(toolbox.evaluate, pop_initial)\n for ind, fit in zip(pop_initial, fitnesses):\n ind.fitness.values = fit\n\n ga = NSGAIISkeleton(pop_initial, toolbox, self.config.get_ga_config(), auxf.get_ref_point(actual_g))\n else:\n actual_g = self.dataset.snapshots[i]\n previous_g = self.dataset.snapshots[i-1]\n previous_sol = snapshot_members[i-1]\n\n # Initialize GA\n toolbox = self.config.make_toolbox(actual_snapshot=actual_g, previous_snapshot=previous_g,\n previous_solution=previous_sol)\n\n pop_initial, n_elite, n_random, n_gen_repaired = self._select_immigrants(toolbox, best_pop)\n ga = self._make_NSGAII(pop_initial, toolbox, auxf.get_ref_point(actual_g))\n\n # Matrix for populations\n population_size = self.config.get_ga_config().population_size\n individual_size = len(pop_initial[0])\n pop_matrix = np.zeros((2, population_size, individual_size), dtype=int)\n\n # Log initial population\n for index, ind in enumerate(pop_initial):\n pop_matrix[0, index, :] = np.array(ind, dtype=int)\n\n # evolve population\n best_pop, pareto, statistics = ga.start()\n\n # Log final population\n for index, ind in enumerate(best_pop):\n pop_matrix[1, index, :] = np.array(ind, dtype=int)\n\n # save statistics\n snapshot_generations[i] = statistics\n generations_taken[i] = len(statistics)\n\n # save whole population\n snapshot_whole_population[i] = pop_matrix\n\n # save immigrants\n immigrants[i] = [n_elite, n_random]\n repaired_list[i] = n_gen_repaired\n\n # save solution\n snapshot_members[i] = auxf.decode(self._select_solution(pareto, actual_g))\n snapshot_pareto[i] = pareto\n\n # save population types\n snapshot_population_types.append(ga.population_types)\n\n r_data = {\n \"snapshot_members\": snapshot_members,\n \"generations_taken\": generations_taken,\n \"immigrants\": immigrants,\n \"repaired_list\": repaired_list,\n \"population_types\": snapshot_population_types\n }\n return r_data, snapshot_generations, snapshot_whole_population, snapshot_pareto\n\n def _select_immigrants(self, tbox, past_population):\n num_elite_immigrants = int(math.ceil((1 - self.config.get_rate_random_immigrants()) *\n self.config.get_ga_config().population_size))\n\n num_random_immigrants = self.config.get_ga_config().population_size - num_elite_immigrants\n\n assert num_elite_immigrants + num_random_immigrants == self.config.get_ga_config().population_size, \\\n \"new population exceeds population size {0}\".format(num_elite_immigrants + num_random_immigrants)\n\n if num_elite_immigrants > 0:\n elite_immigrants, _ = self.config.sel_function(past_population, num_elite_immigrants)\n elite_immigrants = list(tbox.map(tbox.clone, elite_immigrants))\n\n repaired_output = [tbox.repair_1(individual=x) for x in elite_immigrants]\n repaired_output.extend([tbox.repair_2(individual=x) for x in elite_immigrants])\n\n elite_immigrants, n_gen_repaired = zip(*repaired_output)\n\n fitnesses = tbox.map(tbox.evaluate, elite_immigrants)\n for ind, fit in zip(elite_immigrants, fitnesses):\n ind.fitness.values = fit\n ind.i_type = IMMIGRANT\n\n # select the best\n elite_immigrants = tbox.dominance(elite_immigrants, num_elite_immigrants)\n elite_immigrants = list(elite_immigrants)\n else:\n elite_immigrants = []\n n_gen_repaired = 0\n\n random_immigrants = [tbox.individual(RANDOM) for _ in range(num_random_immigrants)]\n fitnesses = tbox.map(tbox.evaluate, random_immigrants)\n for ind, fit in zip(random_immigrants, fitnesses):\n ind.fitness.values = fit\n\n immigrants = elite_immigrants + random_immigrants\n return immigrants, num_elite_immigrants, num_random_immigrants, n_gen_repaired[0:num_elite_immigrants]\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
xhabara/quickai
[ "212b6b83d01e5ffdfa2f1f9740547886aec4b077" ]
[ "quickai/yolo/detect.py" ]
[ "\"\"\"\nfrom quickai import YOLOV4\n\"\"\"\n\nimport time\nimport numpy as np\nimport cv2\nimport tensorflow as tf\n# noinspection PyUnresolvedReferences\nfrom tensorflow.compat.v1 import InteractiveSession\n# noinspection PyUnresolvedReferences\nfrom tensorflow.compat.v1 import ConfigProto\nfrom PIL import Image\nfrom tensorflow.python.saved_model import tag_constants\nfrom .yolov4 import filter_boxes\nfrom .utils import *\n\n\nclass YOLOV4:\n \"\"\"\n Method yolov4_detect is default\n \"\"\"\n\n def __init__(\n self,\n media_type,\n image=\"kite.jpg\",\n video=\"road.mp4\",\n output_format=\"XVID\",\n yolo_classes=\"coco.names\",\n framework=\"tf\",\n weights=\"./checkpoints/yolov4-416\",\n size=416,\n tiny=False,\n model=\"yolov4\",\n output=\"./detections/\",\n iou=0.45,\n score=0.25,\n dont_show=False):\n\n self.video = video\n self.image = image\n self.yolo_classes = yolo_classes\n self.framework = framework\n self.weights = weights\n self.size = size\n self.tiny = tiny\n self.model = model\n self.output = output\n self.iou = iou\n self.score = score\n self.dont_show = dont_show\n self.output_format = output_format\n\n if media_type == \"image\":\n self.yolov4_detect_image()\n elif media_type == \"video\":\n self.yolov4_detect_video()\n\n def yolov4_detect_image(self):\n config = ConfigProto()\n config.gpu_options.allow_growth = True\n session = InteractiveSession(config=config)\n STRIDES, ANCHORS, NUM_CLASS, XYSCALE = load_config(self)\n input_size = self.size\n image = self.image\n\n # load model\n if self.framework == 'tflite':\n interpreter = tf.lite.Interpreter(model_path=self.weights)\n else:\n saved_model_loaded = tf.saved_model.load(\n self.weights, tags=[tag_constants.SERVING])\n\n # loop through images in list and run Yolov4 model on each\n original_image = cv2.imread(image)\n original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)\n\n image_data = cv2.resize(original_image, (input_size, input_size))\n image_data = image_data / 255.\n\n images_data = []\n for i in range(1):\n images_data.append(image_data)\n images_data = np.asarray(images_data).astype(np.float32)\n\n if self.framework == 'tflite':\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n print(input_details)\n print(output_details)\n interpreter.set_tensor(input_details[0]['index'], images_data)\n interpreter.invoke()\n pred = [\n interpreter.get_tensor(\n output_details[i]['index']) for i in range(\n len(output_details))]\n if self.model == 'yolov3' and self.tiny:\n boxes, pred_conf = filter_boxes(\n pred[1], pred[0], score_threshold=0.25, input_shape=tf.constant([input_size, input_size]))\n else:\n boxes, pred_conf = filter_boxes(\n pred[0], pred[1], score_threshold=0.25, input_shape=tf.constant([input_size, input_size]))\n else:\n infer = saved_model_loaded.signatures['serving_default']\n batch_data = tf.constant(images_data)\n pred_bbox = infer(batch_data)\n for key, value in pred_bbox.items():\n boxes = value[:, :, 0:4]\n pred_conf = value[:, :, 4:]\n\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\n scores=tf.reshape(\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\n max_output_size_per_class=50,\n max_total_size=50,\n iou_threshold=self.iou,\n score_threshold=self.score\n )\n pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(),\n valid_detections.numpy()]\n\n # read in all class names from config\n class_names = read_class_names(self.yolo_classes)\n\n # by default allow all classes in .names file\n allowed_classes = list(class_names.values())\n\n # custom allowed classes (uncomment line below to allow detections for only people)\n # allowed_classes = ['person']\n\n image = draw_bbox(original_image, pred_bbox,\n allowed_classes=allowed_classes)\n\n image = Image.fromarray(image.astype(np.uint8))\n if not self.dont_show:\n image.show()\n image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)\n cv2.imwrite(self.output + 'detection' + '.png', image)\n\n def yolov4_detect_video(self):\n config = ConfigProto()\n config.gpu_options.allow_growth = True\n physical_devices = tf.config.experimental.list_physical_devices('GPU')\n session = InteractiveSession(config=config)\n print(self)\n STRIDES, ANCHORS, NUM_CLASS, XYSCALE = load_config(self)\n input_size = self.size\n video_path = self.video\n\n if self.framework == 'tflite':\n interpreter = tf.lite.Interpreter(model_path=self.weights)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n print(input_details)\n print(output_details)\n else:\n saved_model_loaded = tf.saved_model.load(\n self.weights, tags=[tag_constants.SERVING])\n infer = saved_model_loaded.signatures['serving_default']\n\n # begin video capture\n try:\n vid = cv2.VideoCapture(int(video_path))\n except BaseException:\n vid = cv2.VideoCapture(video_path)\n\n out = None\n\n if self.output:\n # by default VideoCapture returns float instead of int\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n codec = cv2.VideoWriter_fourcc(*self.output_format)\n out = cv2.VideoWriter(self.output, codec, fps, (width, height))\n\n while True:\n return_value, frame = vid.read()\n if return_value:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(frame)\n else:\n print('Video has ended or failed, try a different video format!')\n break\n\n frame_size = frame.shape[:2]\n image_data = cv2.resize(frame, (input_size, input_size))\n image_data = image_data / 255.\n image_data = image_data[np.newaxis, ...].astype(np.float32)\n start_time = time.time()\n\n if self.framework == 'tflite':\n interpreter.set_tensor(input_details[0]['index'], image_data)\n interpreter.invoke()\n pred = [\n interpreter.get_tensor(\n output_details[i]['index']) for i in range(\n len(output_details))]\n if self.model == 'yolov3' and self.tiny:\n boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,\n input_shape=tf.constant([input_size, input_size]))\n else:\n boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,\n input_shape=tf.constant([input_size, input_size]))\n else:\n batch_data = tf.constant(image_data)\n pred_bbox = infer(batch_data)\n for key, value in pred_bbox.items():\n boxes = value[:, :, 0:4]\n pred_conf = value[:, :, 4:]\n\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\n scores=tf.reshape(\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\n max_output_size_per_class=50,\n max_total_size=50,\n iou_threshold=self.iou,\n score_threshold=self.score\n )\n pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(),\n valid_detections.numpy()]\n image = draw_bbox(frame, pred_bbox)\n fps = 1.0 / (time.time() - start_time)\n print(\"FPS: %.2f\" % fps)\n result = np.asarray(image)\n cv2.namedWindow(\"result\", cv2.WINDOW_AUTOSIZE)\n result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n if not self.dont_show:\n cv2.imshow(\"result\", result)\n\n if self.output:\n out.write(result)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cv2.destroyAllWindows()\n\n\n# YOLOV4_detect(\"kite.jpg\")\n\n\n'''\nif __name__ == '__main__':\n try:\n app.run(YOLOV4_detect())\n except SystemExit:\n pass\n'''\n" ]
[ [ "numpy.array", "tensorflow.shape", "numpy.asarray", "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.InteractiveSession", "tensorflow.lite.Interpreter", "tensorflow.constant", "tensorflow.config.experimental.list_physical_devices", "tensorflow.saved_model.load" ] ]
lql341/BladeDISC
[ "94342afbfb2a94cfc412d7b0fc776de768e5448d" ]
[ "pytorch_blade/tests/mlir/test_disc_tensor.py" ]
[ "# Copyright 2021 The BladeDISC Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport unittest\n\nfrom tests.mlir.testing_utils import DiscTestCase\n\n\nclass TestDiscTensor(DiscTestCase):\n def test_tensor_cat(self):\n @torch.jit.script\n def tensor_cat(x, y, z):\n return torch.cat([x, y, z], dim=1)\n\n x = torch.randn([2, 1, 4, 4]).to(self.device)\n y = torch.randn([2, 2, 4, 4]).to(self.device)\n z = torch.randn([2, 3, 4, 4]).to(self.device)\n test_data = (x, y, z)\n self._test_cvt_to_disc(tensor_cat, test_data)\n\n z = torch.randn([2, 0, 4, 4]).to(self.device)\n test_data = (x, y, z)\n self._test_cvt_to_disc(tensor_cat, test_data)\n\n x = torch.randint(3, [2, 1, 4, 4]).to(self.device)\n y = torch.randint(3, [2, 2, 4, 4]).to(self.device)\n z = torch.randint(3, [2, 3, 4, 4]).to(self.device)\n test_data = (x, y, z)\n self._test_cvt_to_disc(tensor_cat, test_data)\n\n z = torch.randint(3, [2, 0, 4, 4]).to(self.device)\n test_data = (x, y, z)\n self._test_cvt_to_disc(tensor_cat, test_data)\n\n def test_aten_item(self):\n @torch.jit.script\n def test_item(tensor):\n x = int(tensor.item())\n return torch.tensor(x)\n\n # TODO: aten:Int only support int32_t\n self._test_cvt_to_disc(test_item, (torch.tensor((1 << 31) - 1, dtype=torch.int64),))\n self._test_cvt_to_disc(test_item, (torch.tensor(-2),))\n\n @torch.jit.script\n def test_item_2(tensor):\n # Integer division of tensors using div or / is not supported only in torch\n # 1.6, but works in 1.7 and 1.8, while the error message says it is\n # supported until 1.6. So use // instead.\n x = tensor // torch.tensor(2)\n x = int(x)\n\n return torch.tensor(x) + tensor\n\n self._test_cvt_to_disc(test_item_2, (torch.tensor(-2),))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.randint", "torch.cat", "torch.tensor", "torch.randn" ] ]
CesareMagnetti/RL
[ "cd77e3b4f8ecaa1ca0131cdf60c2381478a252e0" ]
[ "src/RL/agent.py" ]
[ "import torch, os, six, random\nimport numpy as np\nfrom abc import abstractmethod, ABCMeta\n\n\[email protected]_metaclass(ABCMeta)\nclass BaseAgent(object):\n \"\"\"\n base class for all agents\n \"\"\"\n\n def __init__(\n self,\n name,\n trainer,\n action_size,\n n_episodes,\n starting_episode=0,\n checkpoints_dir=\"./checkpoints\",\n results_dir=\"./results\",\n eps_start=1.0,\n eps_end=0.0,\n stop_decay=0.9,\n beta_start=0.4,\n beta_end=1,\n ):\n \"\"\"Initialize agent class\n Params:\n =====\n name (str): name of the experiment.\n trainer (trainer.py instance): a trainer algorithm class (i.e. Q-learning or double-Q-learning).\n action_size (int): number of possible actions, assumes a discrete action space.\n n_episodes (int): number of training episodes.\n starting_episode (optional[int]): starting episode if we load from checkpoint.\n checkpoints_dir (optional[str]): path were to save checkpoints (default=\"./checkpoints\")\n results_dir (optional[str]): path were to save results (default=\"./results\")\n eps_start (optional[float]): starting value for epsilon exploration factor (default=1.0)\n eps_end (optional[float]): final value for epsilon exploration factor (default=0.)\n beta_start (optional[float]): initial value for beta bias correction factor for prioritized experience (default=0.4 from original paper)\n beta_end (optional[float]): final value for beta bias correction factor for prioritized experience (default=1 from original paper)\n stop_decay (optional[float]): ratio of training episodes before we stop decaying eps and beta (default=0.9)\n\n NOTE: the original prioritized experience paper uses linear annealiation of the beta factor, for simplicity we use the same exponential\n decay structure for both beta and epsilon, might change this in the future.\n \"\"\"\n self.name = name\n self.trainer = trainer\n # setup checkpoints and results dirs for any logging/ input output\n self.checkpoints_dir = os.path.join(checkpoints_dir, name)\n self.results_dir = os.path.join(results_dir, name)\n if not os.path.exists(self.checkpoints_dir):\n os.makedirs(self.checkpoints_dir)\n if not os.path.exists(self.results_dir):\n os.makedirs(self.results_dir)\n # setup the action size\n self.action_size = action_size\n # starting epsilon value for exploration/exploitation trade off\n self.eps = eps_start\n # formulate a suitable decay factor for epsilon given the queried options (exponential decay).\n self.EPS_DECAY_FACTOR = (eps_end / eps_start) ** (\n 1 / (int(stop_decay * n_episodes) - starting_episode)\n )\n # starting beta value for bias correction in prioritized experience replay\n self.beta = beta_start\n # formulate a suitable decay factor for beta given the queried options. (since usually beta_end>beta_start, this will actually be an increase factor)\n # annealiate beta to 1 (or beta_end) as we go further in the episode (original P.E.R paper reccommends this)\n self.BETA_DECAY_FACTOR = (beta_end / beta_start) ** (\n 1 / (int(stop_decay * n_episodes) - starting_episode)\n )\n # place holders for episode steps and episode counts\n self.t_step, self.episode = 0, 0\n\n def random_action(self):\n \"\"\"Return a random discrete action.\"\"\"\n return random.choice(np.arange(self.action_size))\n\n def greedy_action(self, state, local_model):\n \"\"\"Returns the discrete action with max Q-value.\n Params:\n ==========\n state (torch.tensor): current state, make sure state tensor is on the same device as local_network.\n local_model (PyTorch model): takes input the state and outputs action value.\n returns:\n int (discrete action that maximizes Q-values)\n \"\"\"\n with torch.no_grad():\n Q = local_model(state)\n return torch.argmax(Q, dim=1).item()\n\n def act(self, state, local_model, eps=0.0):\n \"\"\"Generate an action given some input.\n Params:\n ==========\n state (torch.tensor): current state, make sure state tensor is on the same device as local_network.\n local_model (PyTorch model): takes input the slice and outputs action values\n eps (float): epsilon parameter governing exploration/exploitation trade-off\n \"\"\"\n if random.random() > eps:\n return self.greedy_action(state, local_model)\n else:\n return self.random_action()\n\n @abstractmethod\n def learn(self):\n \"\"\"update policy/value function/network through some routine.\"\"\"\n\n @abstractmethod\n def play_episode(self):\n \"\"\"Make the agent play a full episode.\"\"\"\n\n def train(self):\n raise NotImplementedError()\n\n def soft_update(self, local_model, target_model, tau):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n tau (float): interpolation parameter\n \"\"\"\n for target_param, local_param in zip(\n target_model.parameters(), local_model.parameters()\n ):\n target_param.data.copy_(\n tau * local_param.data + (1.0 - tau) * target_param.data\n )\n\n def hard_update(self, local_model, target_model, N):\n \"\"\"hard update model parameters.\n θ_target = θ_local every N steps.\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n N (flintoat): number of steps after which hard update takes place\n \"\"\"\n if self.t_step % N == 0:\n for target_param, local_param in zip(\n target_model.parameters(), local_model.parameters()\n ):\n target_param.data.copy_(local_param.data)\n" ]
[ [ "torch.no_grad", "numpy.arange", "torch.argmax" ] ]
cvangysel/cuNVSM
[ "3663244fce84dd83816df2ed71b94cc4ebb78a18" ]
[ "py/nvsm/base.py" ]
[ "import h5py\nimport heapq\nimport itertools\nimport numpy as np\nimport logging\nimport scipy.spatial.distance\nimport sklearn.neighbors\n\nfrom cvangysel import sklearn_utils\nfrom nvsm_pb2 import Metadata\n\n\ndef load_meta(path):\n meta = Metadata()\n\n with open('{}_meta'.format(path), 'rb') as f_meta:\n meta.ParseFromString(f_meta.read())\n\n return meta\n\n\ndef load_model(meta, path, epoch, **kwargs):\n with h5py.File('{}_{}.hdf5'.format(path, epoch), 'r') as f_model:\n return LSE(meta, f_model, **kwargs)\n\n\nclass NearestNeighbors(object):\n\n \"\"\"Wrapper around sklearn.neighbors.NearestNeighbors\n that is optimized for cosine distance.\"\"\"\n\n def __init__(self, metric='cosine', **kwargs):\n self.metric = metric\n\n nn_metric = 'euclidean' if self.metric == 'cosine' else self.metric\n\n if 'algorithm' not in kwargs:\n kwargs['algorithm'] = sklearn_utils.neighbors_algorithm(nn_metric)\n\n logging.info('Using %s algorithm for nearest neighbor retrieval '\n 'using %s metric.',\n kwargs['algorithm'], nn_metric)\n\n self.nn_impl = sklearn.neighbors.NearestNeighbors(\n metric=nn_metric, **kwargs)\n\n def fit(self, X, *args, **kwargs):\n if self.metric == 'cosine':\n X = X.copy()\n X /= scipy.linalg.norm(X, axis=1, keepdims=True)\n\n result = self.nn_impl.fit(X, *args, **kwargs)\n\n logging.info('Data was fitted using %s method.',\n self.nn_impl._fit_method)\n\n return result\n\n def kneighbors(self, X, *args, inplace=False, **kwargs):\n if self.metric == 'cosine':\n if not inplace:\n X = X.copy()\n\n X /= scipy.linalg.norm(X, axis=1, keepdims=True)\n\n result = self.nn_impl.kneighbors(X, *args, **kwargs)\n\n if kwargs.get('return_distance', True):\n dist, ind = result\n\n if self.metric == 'cosine':\n # Euclidean distance and cosine similarity/distance are related\n # as follows.\n #\n # First note that cosine distance is equal to 1.0 - cos(A, B).\n # Consequently, cosine distance operates in the domain (0, 2)\n # where higher values indicates dissimilarity. Cosine\n # distance can be converted into cosine similarity using\n # the formula:\n #\n # -cos_distance + 1.0.\n #\n # If A and B are normalized, then Euclidean distance and cosine\n # similarity are related as follows:\n #\n # ||A - B||^2 = 2 * cos_distance\n # cos_distance = ||A - B||^2 / 2\n #\n # Note that x^2 / 2 is a monotonically increasing function when\n # x is positive.\n #\n # Consequently, sorting according to x or according to x^2/2\n # results in the same ranking (for positive x).\n #\n # Given that Euclidean distance is always positive (due to\n # being a metric), we rely on it as a metric during nearest\n # neighbor search.\n\n dist = np.power(dist, 2.0) / 2.0\n\n return dist, ind\n else:\n return result\n\n\nclass TermBruteforcer(object):\n\n def __init__(self, model, max_ngram_cardinality=1):\n self.model = model\n\n reprs = []\n\n for k in range(1, max_ngram_cardinality + 1):\n logging.info('Computing %d-gram indices.', k)\n\n combination_idx = np.array(\n list(itertools.combinations(\n range(model.word_representations.shape[0]), k)),\n dtype=np.int32)\\\n .reshape(-1)\n\n logging.info('Obtaining %d-gram phrase representations.', k)\n\n phrase_repr = model.word_representations[combination_idx]\\\n .reshape(-1, k, model.word_representations.shape[1])\\\n .mean(axis=1)\n\n logging.info('Computing %d-gram projections.', k)\n\n phrase_projection = model.infer(phrase_repr)\n\n reprs.append(phrase_projection)\n\n logging.info('Indexing k-NN.')\n\n self.projection_neighbors = NearestNeighbors(\n metric='cosine',\n n_neighbors=20)\n\n self.projection_neighbors.fit(np.vstack(reprs))\n\n def search(self, projected_query_repr):\n if projected_query_repr is None:\n return None\n\n projected_query_repr = projected_query_repr.copy()\n\n if projected_query_repr.ndim < 2:\n projected_query_repr = projected_query_repr.reshape(1, -1)\n\n neighbor_weights, neighbor_idx = \\\n self.projection_neighbors.kneighbors(projected_query_repr)\n\n neighbor_weights = - neighbor_weights + 1\n\n nearby_ngrams = [[\n (self.model.inv_term_mapping[word_idx],\n neighbor_weights[0, idx])\n for idx, word_idx in enumerate(neighbor_idx[f_idx, :])]\n for f_idx in range(projected_query_repr.shape[0])]\n\n return nearby_ngrams\n\n\nclass NVSM(object):\n\n def __init__(self, meta, f_model,\n only_word_embeddings=False,\n only_object_embeddings=False,\n self_information=False,\n bias_coefficient=0.0,\n nonlinearity=np.tanh,\n strict=False):\n self.total_terms = meta.total_terms\n\n self.self_information = self_information\n self.nonlinearity = nonlinearity\n\n self.strict = strict\n\n if not only_object_embeddings:\n self.word_representations = \\\n f_model['word_representations-representations'][()]\n\n self.num_terms = self.word_representations.shape[0]\n self.term_repr_size = self.word_representations.shape[1]\n\n self.term_mapping = {}\n self.inv_term_mapping = {}\n\n self.inv_term_id_to_term_freq = {}\n\n for term in meta.term:\n assert term.index_term_id not in self.term_mapping\n assert term.model_term_id < self.num_terms\n self.term_mapping[term.index_term_id] = term.model_term_id\n\n assert term.model_term_id not in self.inv_term_mapping\n self.inv_term_mapping[term.model_term_id] = term.index_term_id\n\n assert term.model_term_id not in self.inv_term_id_to_term_freq\n self.inv_term_id_to_term_freq[term.model_term_id] = \\\n term.term_frequency\n\n if not only_word_embeddings:\n self.object_representations = \\\n f_model['entity_representations-representations'][()]\n\n self.num_objects = self.object_representations.shape[0]\n self.object_repr_size = self.object_representations.shape[1]\n\n self.object_mapping = {}\n self.inv_object_mapping = {}\n\n for o in meta.object:\n assert o.model_object_id not in self.object_mapping\n assert o.model_object_id < self.object_representations.shape[0]\n\n self.object_mapping[o.model_object_id] = o.index_object_id\n\n assert o.index_object_id not in self.inv_object_mapping\n self.inv_object_mapping[o.index_object_id] = o.model_object_id\n\n if not only_word_embeddings and not only_object_embeddings:\n self.transform_matrix = \\\n f_model['word_entity_mapping-transform'][()]\n\n if not bias_coefficient != 0.0:\n self.transform_bias = (\n bias_coefficient *\n f_model['word_entity_mapping-bias'][()].ravel())\n else:\n self.transform_bias = None\n\n assert (self.term_repr_size, self.object_repr_size) == \\\n self.transform_matrix.shape\n\n if self.transform_bias is not None:\n assert (self.object_repr_size,) == \\\n self.transform_bias.shape\n\n def __repr__(self):\n return '<NVSM with {} words ({}-dimensional) and ' \\\n '{} entities ({}-dimensional).'.format(\n self.num_terms, self.term_repr_size,\n self.num_objects, self.object_repr_size)\n\n def get_average_object_repr(self):\n if not hasattr(self, 'average_obj_repr'):\n self.average_obj_repr = np.mean(\n self.object_representations,\n axis=0)\n\n return self.average_obj_repr\n\n def get_average_word_repr(self):\n if not hasattr(self, 'average_word_repr'):\n self.average_word_repr = np.mean(\n self.word_representations,\n axis=0)\n\n return self.average_word_repr\n\n def get_word_repr(self, index_term_id):\n if index_term_id not in self.term_mapping:\n logging.warning('Term %s is out of vocabulary.',\n index_term_id)\n\n return None\n\n return self.word_representations[\n self.term_mapping[index_term_id], :]\n\n def query_representation(self, index_term_ids):\n model_terms = []\n\n for index_term_id in index_term_ids:\n if index_term_id not in self.term_mapping:\n if self.strict:\n logging.debug('Term %s is out of vocabulary; '\n 'skipping query.',\n index_term_id)\n\n else:\n logging.debug('Term %s is out of vocabulary; '\n 'skipping term.',\n index_term_id)\n\n continue\n\n model_terms.append(self.term_mapping[index_term_id])\n\n if not model_terms or (\n self.strict and len(model_terms) < len(index_term_ids)):\n return None\n\n if self.self_information:\n model_term_weights = [\n -np.log(self.inv_term_id_to_term_freq[model_term] /\n self.total_terms)\n for model_term in model_terms]\n else:\n model_term_weights = None\n\n average_term_repr = np.average(\n self.word_representations[model_terms, :],\n axis=0, weights=model_term_weights)\n\n return average_term_repr\n\n def infer(self, query_repr):\n if query_repr is None:\n return None\n\n projected_term_repr = np.dot(query_repr, self.transform_matrix)\n\n if self.transform_bias is not None:\n projected_term_repr += self.transform_bias\n\n if self.nonlinearity is not None:\n projected_term_repr = self.nonlinearity(projected_term_repr)\n\n return projected_term_repr\n\n def related_terms(self, index_term_id):\n if index_term_id not in self.term_mapping:\n logging.warning('Term %s is out of vocabulary.',\n index_term_id)\n\n return None\n\n if not hasattr(self, 'word_neighbors'):\n self.word_neighbors = NearestNeighbors(\n metric='cosine',\n n_neighbors=30)\n self.word_neighbors.fit(self.word_representations)\n\n nearest = self.word_neighbors.kneighbors(self.word_representations[\n self.term_mapping[index_term_id], :])\n\n return [self.inv_term_mapping[model_term_id]\n for model_term_id in nearest[1][0, :].tolist()]\n\n def term_similarity(self, first_index_term_id, second_index_term_id):\n if first_index_term_id not in self.term_mapping or \\\n second_index_term_id not in self.term_mapping:\n return None\n\n return 1.0 - scipy.spatial.distance.cosine(\n self.word_representations[\n self.term_mapping[first_index_term_id], :],\n self.word_representations[\n self.term_mapping[second_index_term_id], :])\n\n def query(self, index_terms, *args, **kwargs):\n projected_term_repr = self.infer(\n self.query_representation(index_terms))\n\n return self.query_using_projected_query(\n projected_term_repr, *args, **kwargs)\n\n def query_using_projected_query(\n self, projected_term_repr,\n similarity_fn='cosine',\n similarity_fn_include_prior=False,\n results_requested=1000,\n document_set=None):\n if projected_term_repr is None:\n return None\n\n if document_set:\n document_set = set(document_set)\n\n assert projected_term_repr.size == self.object_repr_size, \\\n projected_term_repr.shape\n\n projected_term_repr = projected_term_repr.ravel().reshape(1, -1)\n\n results_requested = min(\n results_requested,\n self.object_representations.shape[0])\n\n if (not similarity_fn_include_prior and\n results_requested is not None and\n document_set is None):\n if not hasattr(self, 'object_neighbors'):\n self.object_neighbors = NearestNeighbors(\n metric=similarity_fn,\n n_neighbors=results_requested)\n self.object_neighbors.fit(self.object_representations)\n\n self.query_similarity_fn = similarity_fn\n\n assert self.query_similarity_fn == similarity_fn\n\n nearest_dist, nearest_ind = self.object_neighbors.kneighbors(\n projected_term_repr,\n return_distance=True,\n n_neighbors=results_requested)\n\n topic_scores_and_documents = [\n (nearest_dist[0, rank],\n self.object_mapping[nearest_ind[0, rank]])\n for rank in range(nearest_ind.size)]\n else:\n if isinstance(similarity_fn, str):\n actual_similarity_fn = getattr(scipy.spatial.distance,\n similarity_fn)\n\n def similarity_fn(first, second, int_obj_id):\n return actual_similarity_fn(first, second)\n\n iterable = (\n (float(\n similarity_fn(\n projected_term_repr,\n self.object_representations[object_idx, :],\n int_obj_id=self.object_mapping[object_idx])),\n self.object_mapping[object_idx])\n for object_idx in range(self.object_representations.shape[0])\n if not document_set or\n self.object_mapping[object_idx] in document_set)\n\n if results_requested is not None:\n topic_scores_and_documents = heapq.nsmallest(\n n=results_requested, iterable=iterable)\n else:\n topic_scores_and_documents = sorted(iterable)\n\n return topic_scores_and_documents\n\n def score_documents(self, index_term_ids, int_document_ids):\n projected_term_repr = self.infer(\n self.query_representation(index_term_ids))\n\n if projected_term_repr is None:\n return\n\n assert projected_term_repr.shape == (1, self.object_repr_size)\n\n for document_id in int_document_ids:\n if document_id not in self.inv_object_mapping:\n continue\n\n similarity = 1.0 - scipy.spatial.distance.cosine(\n projected_term_repr,\n self.object_representations[\n self.inv_object_mapping[document_id], :])\n\n yield document_id, similarity\n\nLSE = NVSM # Backwards compatibility.\n" ]
[ [ "numpy.dot", "numpy.log", "numpy.mean", "numpy.power", "numpy.average", "numpy.vstack" ] ]
fdelgados/Disaster-Response-Pipeline-Project
[ "9c2c74f74f8b6d55ed09056debc6ab5aeb2f37da" ]
[ "data/process_data.py" ]
[ "import re\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"Loads messages and categories data and creates a merged dataframe\n\n Args:\n messages_filepath (str): Path to the messages file\n categories_filepath (str): Path to the categories file\n\n Returns:\n (pd.DataFrame): A messages and categories dataframe\n \"\"\"\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n\n return messages.merge(categories, on='id')\n\n\ndef replace_url(text, replace=''):\n \"\"\"Replaces all urls with the replacement string\n\n Args:\n text (str): The string being searched and replaced on\n replace (str): The replacement value that replaces found urls\n\n Returns:\n str: text with the replaced urls\n \"\"\"\n url_regex = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n\n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, replace)\n\n return text\n\n\ndef clean_data(df):\n \"\"\"\n Performs the data clean process\n * Creates a column by category\n * Drops duplicated rows\n * Searches and replaces the url from the messages\n * Drops columns with one unique value\n Args:\n df (pd.DataFrame): Dataframe to be cleaned\n\n Returns:\n pd.DataFrame: clean dataframe\n \"\"\"\n categories = df['categories'].str.split(';', expand=True)\n\n row = categories.iloc[0]\n\n # use this row to extract a list of new column names for categories.\n # one way is to apply a lambda function that takes everything \n # up to the second to last character of each string with slicing\n categories.columns = row.apply(lambda x: x[:-2]).tolist()\n\n for column in categories:\n # set each value to be the last character of the string\n categories[column] = categories[column].astype(str).str[-1]\n\n # convert column from string to numeric\n numeric_value = pd.to_numeric(categories[column].apply(lambda x: '1' if x == '1' else '0'))\n categories[column] = numeric_value\n\n # drop the original categories column from `df`\n df.drop('categories', axis=1, inplace=True)\n\n df = pd.concat([df, categories], axis=1)\n\n duplicates = df.duplicated().sum()\n if duplicates > 0:\n df = df.drop_duplicates()\n\n # remove url from messages\n df['message'] = df['message'].apply(lambda text: replace_url(text))\n\n for column in categories:\n # drop columns with one unique value\n if df[column].nunique() == 1:\n df.drop(column, axis=1, inplace=True)\n\n return df\n\n\ndef save_data(df, database_filename):\n \"\"\"\n Save clean data to a database\n\n Args:\n df (pd.DataFrame): clean dataframe\n database_filename (str): Path to the database file\n \"\"\"\n engine = create_engine('sqlite:///{}'.format(database_filename))\n\n df.to_sql('messages', con=engine, index=False, if_exists='replace')\n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n\n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n\n print('Cleaned data saved to database!')\n\n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.read_csv", "pandas.concat" ] ]
DemoUser11221/jax
[ "a7e2f170a4c34e177406d39a24055952f0401629" ]
[ "jax/core.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nfrom functools import partial, total_ordering\nimport gc\nimport itertools as it\nimport operator\nfrom operator import attrgetter\nimport threading\nimport types\nfrom typing import (Any, Callable, ClassVar, Dict, Generator,\n Iterator, List, NamedTuple, Optional, Sequence, Set, Tuple,\n Type, Union, cast, Iterable, Hashable)\nfrom weakref import ref\n\nimport numpy as np\n\nfrom ._src import dtypes\nfrom ._src import config as jax_config\nfrom ._src.config import FLAGS, config\nfrom .errors import (ConcretizationTypeError, TracerArrayConversionError,\n TracerIntegerConversionError, UnexpectedTracerError)\nfrom . import linear_util as lu\n\nfrom jax._src import source_info_util\nfrom ._src.util import (safe_zip, safe_map, curry, prod, partialmethod,\n tuple_insert, tuple_delete, cache, as_hashable_function,\n HashableFunction)\nfrom ._src.pprint_util import pp, vcat, PrettyPrint\n\nfrom ._src import traceback_util\ntraceback_util.register_exclusion(__file__)\n\nzip = safe_zip\nmap = safe_map\n\n\n# -------------------- jaxprs --------------------\n\nclass Jaxpr:\n constvars: List['Var']\n invars: List['Var']\n outvars: List['Atom']\n eqns: List['JaxprEqn']\n\n def __init__(self, constvars: Sequence['Var'], invars: Sequence['Var'],\n outvars: Sequence['Atom'], eqns: Sequence['JaxprEqn']):\n \"\"\"\n Args:\n constvars: list of variables introduced for constants. Array constants are\n replaced with such variables while scalar constants are kept inline.\n invars: list of input variables. Together, `constvars` and `invars` are\n the inputs to the Jaxpr.\n outvars: list of output variables.\n eqns: list of equations.\n \"\"\"\n self.constvars = list(constvars)\n self.invars = list(invars)\n self.outvars = list(outvars)\n self.eqns = list(eqns)\n\n def __str__(self):\n return str(pp_jaxpr(self))\n __repr__ = __str__\n\n\ndef jaxprs_in_params(params) -> Iterator[Jaxpr]:\n for val in params.values():\n vals = val if isinstance(val, tuple) else (val,)\n for v in vals:\n if isinstance(v, Jaxpr):\n yield v\n elif isinstance(v, ClosedJaxpr):\n yield v.jaxpr\n\n\ndef subjaxprs(jaxpr: Jaxpr) -> Iterator[Jaxpr]:\n \"\"\"Generator for all subjaxprs found in the params of jaxpr.eqns.\n\n Does not descend recursively into the found subjaxprs.\n \"\"\"\n for eqn in jaxpr.eqns:\n yield from jaxprs_in_params(eqn.params)\n\n\nclass ClosedJaxpr:\n jaxpr: Jaxpr\n consts: List['Any']\n\n def __init__(self, jaxpr: Jaxpr, consts: Sequence):\n assert len(consts) == len(jaxpr.constvars)\n self.jaxpr = jaxpr\n self.consts = list(consts)\n\n @property\n def in_avals(self):\n return [v.aval for v in self.jaxpr.invars]\n\n @property\n def out_avals(self):\n return [v.aval for v in self.jaxpr.outvars]\n\n @property\n def literals(self):\n return self.consts # backwards compatible alias\n\n @property\n def eqns(self):\n return self.jaxpr.eqns\n\n def map_jaxpr(self, f):\n return ClosedJaxpr(f(self.jaxpr), self.consts)\n\n def __str__(self): return str(self.jaxpr)\n def __repr__(self): return repr(self.jaxpr)\n\n@curry\ndef jaxpr_as_fun(closed_jaxpr: ClosedJaxpr, *args):\n return eval_jaxpr(closed_jaxpr.jaxpr, closed_jaxpr.consts, *args)\n\n\nclass JaxprEqn(NamedTuple):\n invars: List['Atom']\n outvars: List['Var']\n primitive: 'Primitive'\n params: Dict[str, Any]\n source_info: Optional[source_info_util.Traceback]\n\n def __repr__(self): return str(pp_eqn(self)).rstrip()\n\ndef new_jaxpr_eqn(invars, outvars, primitive, params, source_info=None):\n if primitive.call_primitive:\n assert len(outvars) == len(params[\"call_jaxpr\"].outvars)\n return JaxprEqn(invars, outvars, primitive, params, source_info)\n\n\n@total_ordering\nclass Var:\n # TODO(frostig,mattjj): We don't override __eq__ or __hash__, so comparison is\n # by object id, but pretty printing might collide.\n count: int\n suffix: str\n aval: 'AbstractValue'\n\n def __init__(self, count: int, suffix: str, aval: 'AbstractValue'):\n self.count = count\n self.suffix = suffix\n self.aval = raise_to_shaped(aval)\n\n def __lt__(self, other):\n if not isinstance(other, Var):\n return NotImplemented\n else:\n return (self.count, self.suffix) < (other.count, other.suffix)\n\n def __repr__(self):\n rem = self.count\n s = ''\n while True:\n rem, i = rem // 26, rem % 26\n s = chr(97 + i % 26) + s\n if not rem:\n break\n return s + self.suffix\n\ndef _jaxpr_vars(jaxpr):\n return it.chain(\n jaxpr.invars, jaxpr.constvars,\n (v for eqn in jaxpr.eqns for v in eqn.outvars))\n\ndef gensym(jaxprs: Optional[Sequence[Jaxpr]] = None,\n suffix: str = '') -> Callable[['AbstractValue'], Var]:\n \"\"\"Produce distinct variables, printed with the optional suffix.\n\n If `jaxprs` is provided, the variables produced will be distinct from those in\n any of the given jaxprs.\n \"\"\"\n if jaxprs is None:\n start = 0\n else:\n all_vars = it.chain.from_iterable(_jaxpr_vars(j) for j in jaxprs)\n start = 1 + max((v.count for v in all_vars), default=-1)\n counter = it.count(start=start)\n return lambda aval: Var(next(counter), suffix, aval)\n\n# In a jaxpr, `dropvar` can appear in place of a bound variable to indicate that\n# the assignment is dropped, i.e. that an expression's output value will never\n# be read. In that sense, `dropvar` is not a variable, but it is convenient to\n# treat it as a special case of one. Its `aval` is similarly inexact.\nclass DropVar(Var):\n count = -1\n suffix = ''\n def __init__(self): pass\n @property\n def aval(self): return abstract_unit\n def __repr__(self): return '_'\ndropvar = DropVar()\n\nclass Literal:\n __slots__ = [\"val\", \"hash\"]\n\n val: Any\n hash: Optional[int]\n\n def __init__(self, val):\n self.val = val\n try:\n self.hash = hash(val)\n except TypeError:\n if type(val) in literalable_types:\n try:\n self.hash = hash((val.item(), val.dtype))\n except (TypeError, AttributeError, ValueError):\n self.hash = None\n\n @property\n def aval(self):\n return raise_to_shaped(get_aval(self.val))\n\n def __hash__(self):\n assert False\n\n def __repr__(self):\n if hasattr(self, 'hash'):\n return '{}'.format(self.val)\n else:\n return 'Literal(val={})'.format(self.val)\n\nliteralable_types: Set[type] = set()\n\nAtom = Union[Var, Literal]\n\nclass Primitive:\n name: str\n multiple_results = False # set for multi-output primitives\n call_primitive = False # set for call primitives processed in final style\n map_primitive = False # set for map primitives processed in final style\n _dispatch_on_params = False # whether to include axis names from params in dispatch\n\n def __init__(self, name: str):\n self.name = name\n\n def __repr__(self):\n return '{}'.format(self.name)\n\n\n def bind(self, *args, **params):\n assert (not config.jax_enable_checks or\n all(isinstance(arg, Tracer) or valid_jaxtype(arg) for arg in args)), args\n top_trace = find_top_trace(\n args, used_axis_names(self, params) if self._dispatch_on_params else None)\n tracers = map(top_trace.full_raise, args)\n out = top_trace.process_primitive(self, tracers, params)\n return map(full_lower, out) if self.multiple_results else full_lower(out)\n\n def def_impl(self, impl):\n self.impl = impl\n return impl\n\n def def_abstract_eval(self, abstract_eval):\n self.abstract_eval = abstract_eval\n return abstract_eval\n\n def def_custom_bind(self, bind):\n self.bind = bind\n return bind\n\n def impl(self, *args, **params):\n raise NotImplementedError(\"Evaluation rule for '{}' not implemented\"\n .format(self.name))\n\n def abstract_eval(self, *args, **params):\n raise NotImplementedError(\"Abstract evaluation for '{}' not implemented\"\n .format(self.name))\n\n\n# -------------------- lifting --------------------\n\n# TODO(necula): this belongs next to pe.new_eqn_recipe, but is needed in\n# core.py. Plan to move all these utilities to jaxpr.py.\ndef extract_call_jaxpr(\n primitive: Primitive,\n params: Dict[str, Any]) -> Tuple[Optional[Jaxpr], Dict[str, Any]]:\n \"\"\"Extract the call primitive subjaxpr from the params.\n\n Returns the subjaxpr and the params without the \"call_jaxpr\" value. If this is\n not a call primitive then returns (None, params).\n \"\"\"\n if not (primitive.call_primitive or primitive.map_primitive):\n return (None, params)\n else:\n assert \"call_jaxpr\" in params\n new_params = dict(params)\n del new_params[\"call_jaxpr\"]\n return (params[\"call_jaxpr\"], new_params)\n\n\n# TODO(mattjj): replace this approach with a primitive-keyed table of rules\ndef traverse_jaxpr_params(f, params):\n \"\"\"Applies f to each jaxpr parameter and returns a tuple of returned values.\"\"\"\n return {name: f(p)\n for name, param in params.items()\n for p in (param if isinstance(param, (tuple, list)) else [param])\n if type(p) in (Jaxpr, ClosedJaxpr)}\n\n\ndef eval_jaxpr_eqn(eqn, in_vals):\n \"\"\"Evaluates the jaxpr equation with the provided input values.\"\"\"\n call_jaxpr, params = extract_call_jaxpr(eqn.primitive, eqn.params)\n if call_jaxpr:\n subfuns = [lu.wrap_init(partial(eval_jaxpr, call_jaxpr, ()))]\n else:\n subfuns = []\n if eqn.primitive in initial_to_final_param_rules:\n bind_params = initial_to_final_param_rules[eqn.primitive](params)\n elif eqn.primitive.map_primitive:\n out_axes_thunk = HashableFunction(lambda: params['out_axes'],\n closure=params['out_axes'])\n bind_params = dict(params, out_axes_thunk=out_axes_thunk)\n del bind_params['out_axes']\n else:\n bind_params = params\n with source_info_util.user_context(eqn.source_info):\n return eqn.primitive.bind(*(subfuns + in_vals), **bind_params)\n\n\ndef eval_jaxpr(jaxpr: Jaxpr, consts, *args):\n def read(v):\n if type(v) is Literal:\n return v.val\n else:\n return env[v]\n\n def write(v, val):\n env[v] = val\n\n env: Dict[Var, Any] = {}\n write(unitvar, unit)\n map(write, jaxpr.constvars, consts)\n map(write, jaxpr.invars, args)\n for eqn in jaxpr.eqns:\n ans = eval_jaxpr_eqn(eqn, map(read, eqn.invars))\n if eqn.primitive.multiple_results:\n map(write, eqn.outvars, ans)\n else:\n write(eqn.outvars[0], ans)\n return map(read, jaxpr.outvars)\n\ninitial_to_final_param_rules: Dict[Primitive, Callable] = {}\n\n\n# -------------------- tracing --------------------\n\n\nclass Trace:\n __slots__ = ['main', 'level', 'sublevel']\n\n main: 'MainTrace'\n level: int\n sublevel: 'Sublevel'\n\n def __init__(self, main: 'MainTrace', sublevel: 'Sublevel') -> None:\n self.main = main\n self.level = main.level\n self.sublevel = sublevel\n\n def full_raise(self, val) -> 'Tracer':\n if not isinstance(val, Tracer):\n return self.pure(val)\n val._assert_live()\n level = self.level\n sublevel = self.sublevel\n if val._trace.main is self.main:\n if val._trace.sublevel == sublevel:\n return val\n elif val._trace.sublevel < sublevel:\n return self.sublift(val)\n else:\n raise escaped_tracer_error(\n val, f\"Can't lift sublevels {val._trace.sublevel} to {sublevel}\")\n elif val._trace.level < level:\n if val._trace.sublevel > sublevel:\n raise escaped_tracer_error(\n val, f\"Incompatible sublevel: {val._trace}, {(level, sublevel)}\")\n return self.lift(val)\n elif val._trace.level > level:\n raise escaped_tracer_error(\n val, f\"Can't lift level {val} to {self}\")\n else: # val._trace.level == self.level:\n raise escaped_tracer_error(\n val, f\"Different traces at same level: {val}, {self}\")\n\n def pure(self, val):\n raise NotImplementedError(\"must override\")\n\n def lift(self, tracer):\n raise NotImplementedError(\"must override\")\n\n def sublift(self, tracer):\n raise NotImplementedError(\"must override\")\n\n def process_primitive(self, primitive, tracers, params):\n raise NotImplementedError(\"must override\")\n\n def __repr__(self):\n return '{}(level={}/{})'.format(\n self.__class__.__name__, self.level, self.sublevel)\n\n def process_call(self, call_primitive, f, tracers, params):\n msg = (f\"{type(self)} must override process_call to handle call-like \"\n \"primitives\")\n raise NotImplementedError(msg)\n\n def process_map(self, map_primitive, f, tracers, params):\n msg = (f\"{type(self)} must override process_map to handle map-like \"\n \"primitives\")\n raise NotImplementedError(msg)\n\n def process_custom_jvp_call(self, primitive, fun, jvp, tracers):\n msg = (f\"{type(self)} must override process_custom_jvp_call \"\n \"to handle custom_jvp primitives\")\n raise NotImplementedError(msg)\n\n def process_custom_vjp_call(self, primitive, fun, fwd, bwd, tracers, out_trees):\n msg = (f\"{type(self)} must override process_custom_vjp_call \"\n \"to handle custom_vjp primitives\")\n raise NotImplementedError(msg)\n\ndef escaped_tracer_error(tracer, detail=None):\n num_frames = FLAGS.jax_tracer_error_num_traceback_frames\n msg = ('Encountered an unexpected tracer. A function transformed by JAX '\n 'had a side effect, allowing for a reference to an intermediate value '\n f'with shape {tracer.shape} and dtype {tracer.dtype} to escape.\\n'\n 'JAX transformations require that functions explicitly return their '\n 'outputs, and disallow saving intermediate values to global state.')\n dbg = getattr(tracer._trace.main, 'debug_info', None)\n if dbg is not None:\n msg += ('\\nThe function being traced when the value leaked was '\n f'{dbg.func_src_info} traced for {dbg.traced_for}.')\n line_info = getattr(tracer, '_line_info', None)\n if line_info is not None:\n divider = '\\n' + '-'*30 + '\\n'\n msg += divider\n msg += ('The leaked intermediate value was created on line '\n f'{source_info_util.summarize(line_info)}. ')\n msg += divider\n if num_frames > 0:\n msg += (f'When the value was created, the final {num_frames} stack '\n 'frames (most recent last) excluding JAX-internal frames were:')\n msg += divider + source_info_util.summarize(\n line_info, num_frames=num_frames) + divider\n msg += ('\\nTo catch the leak earlier, try setting the environment variable '\n 'JAX_CHECK_TRACER_LEAKS or using the `jax.checking_leaks` context '\n 'manager.')\n if detail:\n msg += f'Detail: {detail}'\n return UnexpectedTracerError(msg)\n\nclass Tracer:\n __array_priority__ = 1000\n __slots__ = ['_trace', '__weakref__', '_line_info']\n\n def __array__(self, *args, **kw):\n raise TracerArrayConversionError(self)\n\n def __index__(self):\n raise TracerIntegerConversionError(self)\n\n def __init__(self, trace: Trace):\n self._trace = trace\n\n def __iter__(self):\n return iter(self.aval._iter(self))\n\n def __len__(self):\n return self.aval._len(self)\n\n @property\n def aval(self):\n raise NotImplementedError(\"must override\")\n\n def _assert_live(self) -> None:\n pass # Override for liveness checking\n\n # Python looks up special methods only on classes, not instances. This means\n # these methods needs to be defined explicitly rather than relying on\n # __getattr__.\n def __neg__(self): return self.aval._neg(self)\n def __pos__(self): return self.aval._pos(self)\n def __eq__(self, other): return self.aval._eq(self, other)\n def __ne__(self, other): return self.aval._ne(self, other)\n def __lt__(self, other): return self.aval._lt(self, other)\n def __le__(self, other): return self.aval._le(self, other)\n def __gt__(self, other): return self.aval._gt(self, other)\n def __ge__(self, other): return self.aval._ge(self, other)\n def __abs__(self): return self.aval._abs(self)\n def __add__(self, other): return self.aval._add(self, other)\n def __radd__(self, other): return self.aval._radd(self, other)\n def __sub__(self, other): return self.aval._sub(self, other)\n def __rsub__(self, other): return self.aval._rsub(self, other)\n def __mul__(self, other): return self.aval._mul(self, other)\n def __rmul__(self, other): return self.aval._rmul(self, other)\n def __div__(self, other): return self.aval._div(self, other)\n def __rdiv__(self, other): return self.aval._rdiv(self, other)\n def __truediv__(self, other): return self.aval._truediv(self, other)\n def __rtruediv__(self, other): return self.aval._rtruediv(self, other)\n def __floordiv__(self, other): return self.aval._floordiv(self, other)\n def __rfloordiv__(self, other): return self.aval._rfloordiv(self, other)\n def __divmod__(self, other): return self.aval._divmod(self, other)\n def __rdivmod__(self, other): return self.aval._rdivmod(self, other)\n def __mod__(self, other): return self.aval._mod(self, other)\n def __rmod__(self, other): return self.aval._rmod(self, other)\n def __pow__(self, other): return self.aval._pow(self, other)\n def __rpow__(self, other): return self.aval._rpow(self, other)\n def __matmul__(self, other): return self.aval._matmul(self, other)\n def __rmatmul__(self, other): return self.aval._rmatmul(self, other)\n def __and__(self, other): return self.aval._and(self, other)\n def __rand__(self, other): return self.aval._rand(self, other)\n def __or__(self, other): return self.aval._or(self, other)\n def __ror__(self, other): return self.aval._ror(self, other)\n def __xor__(self, other): return self.aval._xor(self, other)\n def __rxor__(self, other): return self.aval._rxor(self, other)\n def __invert__(self): return self.aval._invert(self)\n def __lshift__(self, other): return self.aval._lshift(self, other)\n def __rlshift__(self, other): return self.aval._rlshift(self, other)\n def __rshift__(self, other): return self.aval._rshift(self, other)\n def __rrshift__(self, other): return self.aval._rrshift(self, other)\n def __getitem__(self, idx): return self.aval._getitem(self, idx)\n def __nonzero__(self): return self.aval._nonzero(self)\n def __bool__(self): return self.aval._bool(self)\n def __int__(self): return self.aval._int(self)\n def __long__(self): return self.aval._long(self)\n def __hex__(self): return self.aval._hex(self)\n def __oct__(self): return self.aval._oct(self)\n def __float__(self): return self.aval._float(self)\n def __complex__(self): return self.aval._complex(self)\n\n # raises the better error message from ShapedArray\n def __setitem__(self, idx, val): return self.aval._setitem(self, idx, val)\n\n # NumPy also only looks up special methods on classes.\n def __array_module__(self, types): return self.aval._array_module(self, types)\n\n def __getattr__(self, name):\n # if the aval property raises an AttributeError, gets caught here\n assert not config.jax_enable_checks or name != \"aval\"\n\n try:\n attr = getattr(self.aval, name)\n except KeyError as err:\n raise AttributeError(\n \"{} has no attribute {}\".format(self.__class__.__name__, name)\n ) from err\n else:\n t = type(attr)\n if t is aval_property:\n return attr.fget(self)\n elif t is aval_method:\n return types.MethodType(attr.fun, self)\n else:\n return attr\n\n def __repr__(self):\n base = pp('Traced<{}>with<{}>'.format(self.aval, self._trace))\n contents = [(name, pp(repr(attr))) for name, attr in self._contents()]\n if contents:\n base += pp(' with ') >> vcat(pp('{} = '.format(name)) >> pp_payload\n for name, pp_payload in contents)\n return str(base)\n\n def _contents(self):\n try:\n return [(name, getattr(self, name)) for name in self.__slots__]\n except AttributeError:\n return ()\n\n def __copy__(self):\n return self\n\n def __deepcopy__(self, unused_memo):\n return self\n\n def _origin_msg(self) -> str:\n return \"\"\n\n# these can be used to set up forwarding of properties and instance methods from\n# Tracer instances to the underlying avals\naval_property = namedtuple(\"aval_property\", [\"fget\"])\naval_method = namedtuple(\"aval_method\", [\"fun\"])\n\n\nclass EvalTrace(Trace):\n # See comments in https://github.com/google/jax/pull/3370\n def pure(self, x): return x\n lift = sublift = pure\n\n def process_primitive(self, primitive, tracers, params):\n return primitive.impl(*tracers, **params)\n\n def process_call(self, primitive, f, tracers, params):\n return primitive.impl(f, *tracers, **params)\n process_map = process_call\n\n def process_custom_jvp_call(self, primitive, fun, jvp, tracers):\n del primitive, jvp # Unused.\n with new_sublevel():\n return fun.call_wrapped(*tracers)\n\n def process_custom_vjp_call(self, primitive, fun, fwd, bwd, tracers, out_trees):\n del primitive, fwd, bwd, out_trees # Unused.\n with new_sublevel():\n return fun.call_wrapped(*tracers)\n\n\nclass MainTrace:\n level: int\n trace_type: Type[Trace]\n payload: Dict[str, Any]\n\n def __init__(self, level, trace_type, **payload) -> None:\n self.level = level\n self.trace_type = trace_type\n self.payload = payload\n\n def __repr__(self) -> str:\n return \"MainTrace({},{})\".format(self.level, self.trace_type.__name__)\n\n def __hash__(self) -> int:\n return hash((self.level, self.trace_type))\n\n def __eq__(self, other: object) -> bool:\n return (isinstance(other, MainTrace) and\n self.level == other.level and\n self.trace_type == other.trace_type and\n self.payload == other.payload)\n\n def with_cur_sublevel(self):\n return self.trace_type(self, cur_sublevel(), **self.payload)\n\nclass TraceStack:\n # See comments in https://github.com/google/jax/pull/3370\n stack: List[MainTrace]\n dynamic: MainTrace\n\n def __init__(self):\n eval_trace = MainTrace(0, EvalTrace)\n self.stack = [eval_trace]\n self.dynamic = eval_trace\n\n def next_level(self) -> int:\n return len(self.stack)\n\n def push(self, main_trace: MainTrace) -> None:\n self.stack.append(main_trace)\n\n def pop(self) -> None:\n self.stack.pop()\n\n def __repr__(self) -> str:\n stack_str = map(' {}\\n'.format, self.stack[::-1])\n return f'Trace stack\\n{stack_str}\\n{self.dynamic}'\n\n def copy(self):\n new = self.__new__(TraceStack)\n new.stack = self.stack[:]\n new.dynamic = self.dynamic\n return new\n\n\n@total_ordering\nclass Sublevel:\n\n def __init__(self, level: int):\n self.level = level\n\n def __repr__(self):\n return str(self.level)\n\n def __eq__(self, other):\n return type(other) is Sublevel and self.level == other.level\n\n def __lt__(self, other):\n return type(other) is Sublevel and self.level < other.level\n\n\nAxisEnvFrame = namedtuple('AxisEnvFrame', ['name', 'size', 'main_trace'])\nAxisName = Hashable\n\nno_axis_name = object()\n\nclass TraceState:\n trace_stack: TraceStack\n substack: List[Sublevel]\n axis_env: List[AxisEnvFrame]\n\n def __init__(self) -> None:\n self.trace_stack = TraceStack()\n self.substack = [Sublevel(0)]\n self.axis_env = []\n\n def copy(self):\n new = self.__new__(TraceState)\n new.trace_stack = self.trace_stack.copy()\n new.substack = self.substack[:]\n new.axis_env = self.axis_env[:]\n return new\n\n\ndef _update_thread_local_jit_state(dynamic):\n # Copies the MainTrace instance, removing any .debug_info or .jaxpr_stack\n # fields that should not be kept alive as part of a cache key.\n # TODO(mattjj): split debug_info and jaxpr_stack out of MainTrace.\n # TODO(mattjj): add a test that verifies that JIT-ted functions are not kept\n # alive by the JIT cache, particularly for nested JIT-ted functions.\n copy = MainTrace(dynamic.level, dynamic.trace_type, **dynamic.payload)\n jax_config.update_thread_local_jit_state(dynamic_trace_state=copy)\n\n\n# The global state of the tracer is accessed by a thread-local object.\n# This allows concurrent tracing in separate threads; passing traced objects\n# between threads is forbidden.\nclass ThreadLocalState(threading.local):\n def __init__(self):\n self.trace_state = TraceState()\n _update_thread_local_jit_state(self.trace_state.trace_stack.dynamic)\nthread_local_state = ThreadLocalState()\n\ndef trace_state_clean() -> bool:\n trace_state = thread_local_state.trace_state\n return (trace_state.substack == [Sublevel(0)] and\n trace_state.axis_env == [] and\n trace_state.trace_stack.stack == [MainTrace(0, EvalTrace)] and\n trace_state.trace_stack.dynamic == MainTrace(0, EvalTrace))\n\ndef reset_trace_state() -> bool:\n \"Reset the global trace state and return True if it was already clean.\"\n if not trace_state_clean():\n thread_local_state.trace_state.__init__() # type: ignore\n return False\n else:\n return True\n\ndef cur_sublevel() -> Sublevel:\n return thread_local_state.trace_state.substack[-1]\n\ndef maybe_find_leaked_tracers(x: Optional[Union[MainTrace, Sublevel]]):\n \"\"\"Find the leaked tracers holding a reference to the MainTrace or SubLevel.\n\n It's possible there's none! eg. there's some cases where JAX itself holds a\n reference to `x` inside of a lambda closure, and no tracers were leaked\n by the user. In this case an empty list is returned.\n \"\"\"\n traces = list(filter(lambda x: isinstance(x, Trace), gc.get_referrers(x)))\n tracers = list(filter(lambda x: isinstance(x, Tracer), gc.get_referrers(*traces)))\n return tracers\n\n@contextmanager\ndef new_main(trace_type: Type[Trace],\n dynamic: bool = False,\n **payload) -> Generator[MainTrace, None, None]:\n # See comments in https://github.com/google/jax/pull/3370\n stack = thread_local_state.trace_state.trace_stack\n level = stack.next_level()\n main = MainTrace(level, trace_type, **payload)\n stack.push(main)\n if dynamic:\n prev_dynamic, stack.dynamic = stack.dynamic, main\n _update_thread_local_jit_state(stack.dynamic)\n\n try:\n yield main\n finally:\n stack.pop()\n if dynamic:\n stack.dynamic = prev_dynamic\n _update_thread_local_jit_state(stack.dynamic)\n\n if config.jax_check_tracer_leaks:\n t = ref(main)\n del main\n if t() is not None:\n leaked_tracers = maybe_find_leaked_tracers(t())\n if leaked_tracers:\n raise Exception(f'Leaked level {t()}. Leaked tracer(s): {leaked_tracers}.')\n\n@contextmanager\ndef new_base_main(trace_type: Type[Trace]) -> Generator[MainTrace, None, None]:\n # See comments in https://github.com/google/jax/pull/3370\n stack = thread_local_state.trace_state.trace_stack\n main = MainTrace(0, trace_type)\n prev_dynamic, stack.dynamic = stack.dynamic, main\n prev_base, stack.stack[0] = stack.stack[0], main\n _update_thread_local_jit_state(stack.dynamic)\n try:\n yield main\n finally:\n stack.dynamic = prev_dynamic\n stack.stack[0] = prev_base\n _update_thread_local_jit_state(stack.dynamic)\n\n if config.jax_check_tracer_leaks:\n t = ref(main)\n del main\n if t() is not None:\n leaked_tracers = maybe_find_leaked_tracers(t())\n if leaked_tracers:\n raise Exception(f'Leaked level {t()}. Leaked tracer(s): {leaked_tracers}.')\n\n@contextmanager\ndef eval_context():\n with new_base_main(EvalTrace):\n yield\n\n@contextmanager\ndef new_sublevel() -> Generator[None, None, None]:\n sublevel = Sublevel(len(thread_local_state.trace_state.substack))\n thread_local_state.trace_state.substack.append(sublevel)\n try:\n yield\n finally:\n thread_local_state.trace_state.substack.pop()\n\n if config.jax_check_tracer_leaks:\n t = ref(sublevel)\n del sublevel\n if t() is not None:\n leaked_tracers = maybe_find_leaked_tracers(t())\n if leaked_tracers:\n raise Exception(f'Leaked sublevel {t()}. Leaked tracer(s): {leaked_tracers}.')\n\ndef full_lower(val):\n if isinstance(val, Tracer):\n return val.full_lower()\n else:\n return val\n\ndef find_top_trace(xs, axis_names=None) -> Trace:\n top_main: Optional[MainTrace] = None\n if axis_names:\n top_main = max((axis_frame(a).main_trace for a in axis_names),\n default=None, key=lambda t: getattr(t, 'level', -1))\n top_tracer = max((x for x in xs if isinstance(x, Tracer)),\n default=None, key=attrgetter('_trace.level'))\n if top_tracer is not None:\n top_tracer._assert_live()\n if top_tracer._trace.main.level > getattr(top_main, 'level', -1):\n top_main = top_tracer._trace.main\n dynamic = thread_local_state.trace_state.trace_stack.dynamic\n top_main = (dynamic if top_main is None or dynamic.level > top_main.level\n else top_main)\n return top_main and top_main.with_cur_sublevel() # type: ignore\n\n\n# -------------------- abstract values --------------------\n\n\nclass AbstractValue:\n __slots__: List[str] = []\n _num_buffers: int = 1 # number of buffers used to represent the value.\n\n def at_least_vspace(self):\n raise NotImplementedError(\"must override\")\n\n def __repr__(self):\n try:\n kv_pairs = ('{}={}'.format(k, v) for k, v in self.__dict__.items())\n return '{}({})'.format(self.__class__.__name__, ','.join(kv_pairs))\n except AttributeError:\n return self.__class__.__name__\n\n def strip_weak_type(self) -> 'AbstractValue':\n return self\n\n def strip_named_shape(self) -> 'AbstractValue':\n return self\n\n def join(self, other):\n raise NotImplementedError(\"must override\")\n\n def update(self, **kwargs):\n raise NotImplementedError(\"must override\")\n\n def str_short(self):\n raise NotImplementedError(\"must override\")\n\nclass Bot(AbstractValue): pass\n\nbot = Bot()\n\nclass AbstractUnit(AbstractValue):\n # TODO(jakevdp): make it possible to set zero buffers\n # _num_buffers = 0\n def at_least_vspace(self): return self\n def join(self, other):\n if config.jax_enable_checks:\n assert other is abstract_unit, other\n return self\n def _eq(self, self_traced, other): return get_aval(other) is self\n def str_short(self): return '*'\n\nabstract_unit = AbstractUnit()\n\ndef lattice_join(x: Optional[AbstractValue],\n y: Optional[AbstractValue]) -> AbstractValue:\n if x is None:\n return cast(AbstractValue, y)\n elif y is None:\n return cast(AbstractValue, x)\n elif isinstance(x, type(y)):\n return y.join(x)\n elif isinstance(y, type(x)):\n return x.join(y)\n else:\n raise TypeError(x, y)\n\n# For use in typing annotations to denote either a Tracer or a `valid_jaxtype`.\nValue = Any\n\ndef valid_jaxtype(x):\n try:\n concrete_aval(x)\n except TypeError:\n return False\n else:\n return True\n\ndef check_valid_jaxtype(x):\n if not valid_jaxtype(x):\n raise TypeError(\n f\"Value {repr(x)} of type {type(x)} is not a valid JAX type\")\n\n\ndef concrete_aval(x):\n for typ in type(x).mro():\n handler = pytype_aval_mappings.get(typ)\n if handler: return handler(x)\n if hasattr(x, '__jax_array__'):\n return concrete_aval(x.__jax_array__())\n raise TypeError(f\"Value {repr(x)} with type {type(x)} is not a valid JAX \"\n \"type\")\n\n\ndef get_aval(x):\n if isinstance(x, Tracer):\n return x.aval\n else:\n return concrete_aval(x)\n\n\npytype_aval_mappings: Dict[type, Callable[[Any], AbstractValue]] = {}\n\n\nclass Unit:\n def __repr__(self): return '*'\nunit = Unit()\nliteralable_types.add(Unit)\n\nclass UnitVar(Var):\n count = -1\n suffix = ''\n def __init__(self): pass\n @property\n def aval(self): return abstract_unit\n def __repr__(self): return '*'\nunitvar = UnitVar()\n\npytype_aval_mappings[Unit] = lambda _: abstract_unit\n\ndef concretization_function_error(fun, suggest_astype=False):\n fname = getattr(fun, \"__name__\", fun)\n fname_context = f\"The problem arose with the `{fname}` function. \"\n if suggest_astype:\n fname_context += (\"If trying to convert the data type of a value, \"\n f\"try using `x.astype({fun.__name__})` \"\n f\"or `jnp.array(x, {fun.__name__})` instead.\")\n def error(self, arg):\n raise ConcretizationTypeError(arg, fname_context)\n return error\n\ndef concrete_or_error(force: Any, val: Any, context=\"\"):\n \"\"\"Like force(val), but gives the context in the error message.\"\"\"\n if force is None:\n force = lambda x: x\n if isinstance(val, Tracer):\n if isinstance(val.aval, ConcreteArray):\n return force(val.aval.val)\n else:\n raise ConcretizationTypeError(val, context)\n else:\n return force(val)\n\nconvert_element_type_p = Primitive('convert_element_type')\n\nclass UnshapedArray(AbstractValue):\n __slots__ = ['dtype', 'weak_type']\n array_abstraction_level = 2\n\n def __init__(self, dtype, weak_type=False):\n self.dtype = np.dtype(dtypes.canonicalize_dtype(dtype))\n self.weak_type = weak_type\n\n def update(self, dtype=None, weak_type=None):\n if dtype is None:\n dtype = self.dtype\n if weak_type is None:\n weak_type = self.weak_type\n return UnshapedArray(dtype, weak_type)\n\n def __eq__(self, other):\n return (type(self) is type(other) and self.dtype == other.dtype and\n self.weak_type == other.weak_type)\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n # can use hash(self.dtype) and rely on the fact that numpy reuses base dtype\n # objects, e.g. `np.zeros(3).dtype is np.zeros(4).dtype`, or we can use\n # the unique character code via hash(self.dtype.char)\n return hash((self.dtype, self.weak_type))\n\n def __repr__(self):\n return '{}({}{})'.format(self.__class__.__name__, self.str_short(),\n \", weak_type=True\" if self.weak_type else \"\")\n\n _bool = _nonzero = concretization_function_error(bool)\n _float = concretization_function_error(float, True)\n _int = concretization_function_error(int, True)\n _complex = concretization_function_error(complex, True)\n _hex = concretization_function_error(hex)\n _oct = concretization_function_error(oct)\n\n def at_least_vspace(self) -> AbstractValue:\n return UnshapedArray(primal_dtype_to_tangent_dtype(self.dtype),\n self.weak_type)\n\n def join(self, other):\n if self.dtype == other.dtype:\n if self.weak_type == other.weak_type:\n return self\n else:\n return UnshapedArray(self.dtype, weak_type=False)\n else:\n raise TypeError(self, other)\n\n def str_short(self) -> str:\n return self.dtype.name\n\n def strip_weak_type(self):\n \"\"\"Returns a copy of the aval with weak_type=False.\"\"\"\n return self.update(weak_type=False)\n\n @property\n def shape(self):\n msg = (\"UnshapedArray has no shape. Please open an issue at \"\n \"https://github.com/google/jax/issues because it's unexpected for \"\n \"UnshapedArray instances to ever be produced.\")\n raise TypeError(msg)\n\nclass ShapedArray(UnshapedArray):\n __slots__ = ['shape', 'named_shape']\n array_abstraction_level = 1\n\n def __init__(self, shape, dtype, weak_type=False, named_shape={}):\n super().__init__(dtype, weak_type=weak_type)\n self.shape = canonicalize_shape(shape)\n self.named_shape = dict(named_shape)\n\n def update(self, shape=None, dtype=None, weak_type=None, named_shape=None):\n if shape is None:\n shape = self.shape\n if dtype is None:\n dtype = self.dtype\n if weak_type is None:\n weak_type = self.weak_type\n if named_shape is None:\n named_shape = self.named_shape\n return ShapedArray(shape, dtype, weak_type, named_shape)\n\n ndim = property(lambda self: len(self.shape))\n size = property(lambda self: prod(self.shape))\n\n broadcast: ClassVar[Optional[aval_method]] = None\n transpose: ClassVar[Optional[aval_method]] = None\n reshape: ClassVar[Optional[aval_method]] = None\n _iter: ClassVar[Optional[staticmethod]] = None\n\n def __eq__(self, other):\n return (type(self) is type(other)\n and self.dtype == other.dtype and self.shape == other.shape\n and self.weak_type == other.weak_type\n and self.named_shape == other.named_shape)\n\n def __hash__(self):\n # can use hash(self.dtype) and rely on the fact that numpy reuses base dtype\n # objects, e.g. `np.zeros(3).dtype is np.zeros(4).dtype`, or we can use\n # the unique character code via hash(self.dtype.char)\n return hash((self.shape, self.dtype, self.weak_type,\n tuple(self.named_shape.items())))\n\n def at_least_vspace(self):\n return ShapedArray(self.shape, primal_dtype_to_tangent_dtype(self.dtype),\n self.weak_type, self.named_shape)\n\n def join(self, other):\n if symbolic_equal_shape(self.shape, other.shape) and self.dtype == other.dtype:\n weak_type = self.weak_type and other.weak_type\n named_shape = join_named_shapes(self.named_shape, other.named_shape)\n return self.update(weak_type=weak_type, named_shape=named_shape)\n elif self.dtype == other.dtype:\n return UnshapedArray(self.dtype)\n else:\n raise TypeError(self, other)\n\n def str_short(self):\n shapestr = ','.join(map(str, self.shape))\n if self.named_shape:\n named_shapestr = ','.join(f'{k}:{v}' for k, v in self.named_shape.items())\n return f'{self.dtype.name}[{shapestr};{named_shapestr}]'\n else:\n return f'{self.dtype.name}[{shapestr}]'\n\n def strip_named_shape(self):\n return self.update(named_shape={})\n\n def __len__(self):\n try:\n return self.shape[0]\n except IndexError as err:\n raise TypeError(\"len() of unsized object\") from err # same as numpy error\n\n def _len(self, ignored_tracer):\n return len(self)\n\n\ndef _forward_to_value(self, fun, ignored_tracer, *args):\n return fun(self.val, *args)\n\nclass ConcreteArray(ShapedArray):\n __slots__ = ['val']\n array_abstraction_level = 0\n\n def __init__(self, val, weak_type=False):\n super().__init__(np.shape(val), np.result_type(val),\n weak_type=weak_type)\n # Note: canonicalized self.dtype doesn't necessarily match self.val\n self.val = val\n assert self.dtype != np.dtype('O'), val\n\n def update(self, val=None, weak_type=None):\n if val is None:\n val = self.val\n if weak_type is None:\n weak_type = self.weak_type\n return ConcreteArray(val, weak_type)\n\n def __eq__(self, other):\n if (type(self) is type(other) and self.dtype == other.dtype\n and self.shape == other.shape and self.weak_type == other.weak_type):\n with eval_context(): # in case self.val is a DeviceArray\n return (self.val == other.val).all()\n else:\n return False\n\n def __hash__(self):\n return id(self.val)\n\n def join(self, other) -> AbstractValue:\n if self == other:\n return self\n elif self.shape == other.shape and self.dtype == other.dtype:\n weak_type = self.weak_type and other.weak_type\n named_shape = join_named_shapes(self.named_shape, other.named_shape)\n return ShapedArray(\n self.shape, self.dtype, weak_type=weak_type, named_shape=named_shape)\n elif self.dtype == other.dtype:\n return UnshapedArray(self.dtype,\n weak_type=self.weak_type and other.weak_type)\n else:\n raise TypeError(self, other)\n\n def str_short(self) -> str:\n return f'{self.val}, dtype={self.dtype.name}'\n\n _bool = _nonzero = partialmethod(_forward_to_value, bool)\n _int = partialmethod(_forward_to_value, int)\n _hex = partialmethod(_forward_to_value, hex)\n _oct = partialmethod(_forward_to_value, oct)\n\n _float = concretization_function_error(float, True)\n _complex = concretization_function_error(complex, True)\n\ndef primal_dtype_to_tangent_dtype(primal_dtype):\n if not dtypes.issubdtype(primal_dtype, np.inexact):\n return dtypes.float0\n else:\n return primal_dtype\n\nclass AbstractToken(AbstractValue):\n def join(self, other):\n if isinstance(other, AbstractToken):\n return self\n else:\n assert False, f\"Cannot join {self} with {other}\"\n def str_short(self): return 'Tok'\n def at_least_vspace(self): return self\n\nabstract_token: AbstractToken = AbstractToken()\n\n\ndef raise_to_shaped(aval: AbstractValue, weak_type=None):\n if weak_type is None:\n weak_type = getattr(aval, 'weak_type', False)\n for typ in type(aval).mro():\n handler = raise_to_shaped_mappings.get(typ)\n if handler: return handler(aval, weak_type)\n raise TypeError(type(aval))\n\nraise_to_shaped_mappings : Dict[type, Callable] = {\n AbstractUnit: lambda aval, _: aval,\n AbstractToken: lambda aval, _: aval,\n Bot: lambda aval, _: aval,\n UnshapedArray: lambda aval, _: aval,\n ShapedArray: lambda aval, weak_type: ShapedArray(\n aval.shape, aval.dtype, weak_type, aval.named_shape)\n}\n\n### Operations on shapes and dimension sizes.\n\n# Shapes are tuples of dimension sizes, which are normally integers. We allow\n# modules to extend the set of dimension sizes to contain other types, e.g.,\n# symbolic dimensions in jax2tf.shape_poly.DimVar and masking.Poly.\nDimSize = Union[int, Any] # extensible\nShape = Sequence[DimSize]\n\n\nclass InconclusiveDimensionOperation(Exception):\n \"\"\"Raised when we cannot conclusively compute with symbolic dimensions.\"\"\"\n pass\n\nclass DimensionHandler:\n \"\"\"Operations on dimension sizes.\n\n Dimension sizes are normally integer constants, but can also be symbolic,\n e.g., masking.Poly or jax2tf.shape_poly.DimVar.\n\n The base class works for integers only. Subclasses are invoked when at\n least one of the operands has a type registered in _SPECIAL_DIMENSION_HANDLERS.\n In that case, all operands are guaranteed to be either the special dimension\n type, or Python integer scalars.\n\n Subclasses should raise InconclusiveDimensionOperation if the result cannot\n be computed in some contexts.\n \"\"\"\n def is_constant(self, d: DimSize) -> bool:\n \"\"\"The dimension is a constant.\"\"\"\n return True\n\n def symbolic_equal(self, d1: DimSize, d2: DimSize) -> bool:\n \"\"\"True iff the dimension sizes are equal in all contexts; False otherwise.\n Unlike `d1 == d2` this never raises InconclusiveDimensionOperation.\n \"\"\"\n return d1 == d2\n\n def greater_equal(self, d1: DimSize, d2: DimSize) -> bool:\n \"\"\"Computes `d1 >= d2`.\n Raise InconclusiveDimensionOperation if the result is different in\n different contexts.\n \"\"\"\n return d1 >= d2\n\n def sum(self, *ds: DimSize) -> DimSize:\n \"\"\"Sum of dimensions.\n Raises InconclusiveDimensionOperation if the result cannot be represented\n by the same DimSize in all contexts.\n \"\"\"\n return sum(ds)\n\n def diff(self, d1: DimSize, d2: DimSize) -> DimSize:\n \"\"\"Difference of dimensions.\n Raises InconclusiveDimensionOperation if the result cannot be represented\n by the same DimSize in all contexts.\n \"\"\"\n return d1 - d2\n\n def divide_shape_sizes(self, s1: Shape, s2: Shape) -> DimSize:\n \"\"\"Computes integer \"i\" such that i * size(s2) == size(s1).\n\n Raise InconclusiveDimensionOperation if there is no such integer for all\n contexts,\n \"\"\"\n sz1 = int(np.prod(s1))\n sz2 = int(np.prod(s2))\n if sz1 == 0 and sz2 == 0:\n return 1\n if sz1 % sz2:\n raise InconclusiveDimensionOperation(f\"Cannot divide evenly the sizes of shapes {tuple(s1)} and {tuple(s2)}\")\n return sz1 // sz2\n\n def stride(self, d: DimSize, window_size: DimSize, window_stride: DimSize) -> DimSize:\n \"\"\"(d - window_size) // window_stride + 1\"\"\"\n return (d - window_size) // window_stride + 1\n\n def dilate(self, d: DimSize, dilation: int) -> DimSize:\n \"\"\"Implements `0 if d == 0 else 1 + dilation * (d - 1))`\"\"\"\n return 0 if d == 0 else 1 + dilation * (d - 1)\n\n def as_value(self, d: DimSize):\n \"\"\"Turns a dimension size into a JAX value that we can compute with.\"\"\"\n return d\n\n_dimension_handler_int = DimensionHandler()\n_SPECIAL_DIMENSION_HANDLERS: Dict[type, DimensionHandler] = {}\n\ndef _dim_handler_and_canonical(*dlist: DimSize) -> Tuple[DimensionHandler, Tuple[DimSize, ...]]:\n \"\"\"Finds the handler for the given dimensions; also returns the canonical dimensions.\n\n A dimension is canonical if it is a Python integer scalar, or has a type\n registered in _SPECIAL_DIMENSION_HANDLERS.\n \"\"\"\n special_handlers = set()\n canonical = []\n for d in dlist:\n handler = _SPECIAL_DIMENSION_HANDLERS.get(type(d))\n if handler:\n special_handlers.add(handler)\n canonical.append(d)\n else:\n try:\n canonical.append(operator.index(d))\n except TypeError:\n raise _invalid_shape_error(dlist)\n\n if len(special_handlers) > 1:\n msg = (f\"Dimension size operation involves multiple special dimension types {dlist}\")\n raise ValueError(msg)\n return next(iter(special_handlers), _dimension_handler_int), tuple(canonical)\n\ndef is_constant_dim(d: DimSize) -> bool:\n handler, ds = _dim_handler_and_canonical(d)\n return handler.is_constant(*ds)\n\ndef symbolic_equal_dim(d1: DimSize, d2: DimSize) -> bool:\n handler, ds = _dim_handler_and_canonical(d1, d2)\n return handler.symbolic_equal(*ds)\n\ndef symbolic_equal_one_of_dim(d1: DimSize, dlist: Sequence[DimSize]) -> bool:\n handler, ds = _dim_handler_and_canonical(d1, *dlist)\n return any([handler.symbolic_equal(ds[0], d) for d in ds[1:]])\n\ndef symbolic_equal_shape(s1: Shape, s2: Shape) -> bool:\n return (len(s1) == len(s2) and\n all(map(symbolic_equal_dim, s1, s2)))\n\ndef greater_equal_dim(d1: DimSize, d2: DimSize) -> bool:\n handler, ds = _dim_handler_and_canonical(d1, d2)\n return handler.greater_equal(*ds)\n\ndef greater_equal_shape(s1: Shape, s2: Shape) -> bool:\n return all(map(greater_equal_dim, s1, s2))\n\ndef sum_dim(*ds: DimSize) -> DimSize:\n handler, ds = _dim_handler_and_canonical(*ds)\n return handler.sum(*ds)\n\ndef sum_shapes(*ss: Shape) -> Shape:\n return tuple(map(sum_dim, *ss))\n\ndef diff_dim(d1: DimSize, d2: DimSize) -> DimSize:\n handler, ds = _dim_handler_and_canonical(d1, d2)\n return handler.diff(*ds)\n\ndef diff_shape(s1: Shape, s2: Shape) -> Shape:\n return tuple(map(diff_dim, s1, s2))\n\ndef divide_shape_sizes(s1: Shape, s2: Shape) -> DimSize:\n \"\"\"Returns an integer \"i\" s.t., i * size(s2) == size(s1).\n Raises if there is no such integer.\"\"\"\n s1 = s1 or (1,)\n s2 = s2 or (1,)\n handler, ds = _dim_handler_and_canonical(*s1, *s2)\n return handler.divide_shape_sizes(ds[:len(s1)], ds[len(s1):])\n\ndef same_shape_sizes(s1: Shape, s2: Shape) -> bool:\n return 1 == divide_shape_sizes(s1, s2)\n\ndef is_empty_shape(s: Shape) -> bool:\n return any(symbolic_equal_dim(d, 0) for d in s)\n\ndef dilate_dim(d: DimSize, dilation: DimSize) -> DimSize:\n \"\"\"Implements `0 if d == 0 else 1 + dilation * (d - 1))`\"\"\"\n handler, ds = _dim_handler_and_canonical(d, dilation)\n return handler.dilate(*ds)\n\ndef dilate_shape(s: Shape, dilations: Sequence[int]) -> Shape:\n return tuple(map(dilate_dim, s, dilations))\n\ndef stride_dim(d: DimSize, window_size: DimSize, window_stride: DimSize) -> DimSize:\n handler, ds = _dim_handler_and_canonical(d, window_size, window_stride)\n return handler.stride(*ds)\n\ndef stride_shape(s: Shape, window_size: Shape, window_stride: Shape) -> Shape:\n \"\"\"(s - window_size) // window_stride + 1\"\"\"\n return tuple(map(stride_dim, s, window_size, window_stride))\n\ndef dimension_as_value(d: DimSize):\n \"\"\"Turns a dimension size into a JAX value that we can compute with.\n This is the identity function for constant dimensions.\"\"\"\n handler, ds = _dim_handler_and_canonical(d)\n return handler.as_value(*ds)\n\ndef _canonicalize_dimension(dim: DimSize) -> DimSize:\n if type(dim) in _SPECIAL_DIMENSION_HANDLERS:\n return dim\n else:\n return operator.index(dim)\n\ndef canonicalize_shape(shape: Shape, context: str=\"\") -> Shape:\n \"\"\"Canonicalizes and checks for errors in a user-provided shape value.\n\n Args:\n shape: a Python value that represents a shape.\n\n Returns:\n A tuple of canonical dimension values.\n \"\"\"\n try:\n return tuple(map(_canonicalize_dimension, shape))\n except TypeError:\n pass\n raise _invalid_shape_error(shape, context)\n\ndef canonicalize_dim(d: DimSize, context: str=\"\") -> DimSize:\n \"\"\"Canonicalizes and checks for errors in a user-provided shape dimension value.\n\n Args:\n f: a Python value that represents a dimension.\n\n Returns:\n A canonical dimension value.\n \"\"\"\n return canonicalize_shape((d,), context)[0]\n\ndef _invalid_shape_error(shape: Shape, context: str=\"\"):\n msg = (\"Shapes must be 1D sequences of concrete values of integer type, \"\n f\"got {shape}.\")\n if context:\n msg += f\" {context}.\"\n if any(isinstance(x, Tracer) and isinstance(get_aval(x), ShapedArray)\n and not isinstance(get_aval(x), ConcreteArray) for x in shape):\n msg += (\"\\nIf using `jit`, try using `static_argnums` or applying `jit` to \"\n \"smaller subfunctions.\")\n return TypeError(msg)\n\n# ------------------- Named shapes -------------------\n\n\nclass NamedShape:\n def __init__(self, *args, **kwargs):\n self.__positional = canonicalize_shape(args)\n # TODO: Assert that kwargs match axis env?\n self.__named = dict(kwargs)\n\n @property\n def rank(self):\n return len(self.__positional) + len(self.__named)\n\n @property\n def positional_rank(self):\n return len(self.__positional)\n\n @property\n def named_rank(self):\n return len(self.__named)\n\n @property\n def positional(self):\n return self.__positional\n\n @property\n def names(self):\n return self.__named.keys()\n\n @property\n def named_sizes(self):\n return self.__named.values()\n\n @property\n def named_items(self):\n return self.__named.items()\n\n def __getitem__(self, idx):\n try:\n idx = operator.index(idx)\n return self.__positional[idx]\n except TypeError:\n pass\n return self.__named[idx]\n\n @property\n def total(self):\n total = 1\n for s in self.__positional: total *= s\n for s in self.__named.values(): total *= s\n return total\n\n def __str__(self):\n return (f\"({', '.join(map(str, self.__positional))}{', ' if self.__named else ''}\"\n f\"{', '.join(f'{k}={v}' for k, v in self.__named.items())})\")\n\n def __eq__(self, other):\n if isinstance(other, NamedShape):\n return (self.__positional, self.__named) == (other.__positional, other.__named)\n if isinstance(other, tuple):\n return not self.__named and self.__positional == other\n raise TypeError(f\"NamedShape doesn't support comparisons with {type(other)}\")\n\n def __hash__(self):\n return hash((self.__positional, tuple(self.__named.items())))\n\ndef join_named_shapes(*named_shapes):\n result = {}\n for named_shape in named_shapes:\n for name, size in named_shape.items():\n if result.setdefault(name, size) != size:\n raise TypeError(\n f\"Axis name {name} used with inconsistent sizes: {result[name]} != {size}\")\n return result\n\n# TODO: Make canonicalize_shape return named shapes?\ndef as_named_shape(shape) -> NamedShape:\n if isinstance(shape, NamedShape):\n return shape\n return NamedShape(*shape)\n\n\n# ------------------- Call -------------------\n\ndef apply_todos(todos, outs):\n todos_list = list(todos)\n while todos_list:\n outs = map(full_lower, todos_list.pop()(outs))\n return outs\n\nclass _IgnoreElemList(list):\n \"\"\"Compares equal to all other _ignore_elem_lists.\"\"\"\n def __hash__(self): return 0\n def __eq__(self, other):\n return type(other) is _IgnoreElemList\n\[email protected]_with_aux\ndef process_env_traces(primitive: Union['CallPrimitive', 'MapPrimitive'],\n level: int, params_tuple: tuple, out_axes_transforms, *args):\n outs = yield args, {}\n params = dict(params_tuple)\n todo = []\n assert not out_axes_transforms\n while True:\n tracers = [x for x in outs if isinstance(x, Tracer)\n and (level is None or x._trace.level > level)]\n if tracers:\n ans = max(tracers, key=lambda x: x._trace.level)\n else:\n break\n trace = ans._trace.main.with_cur_sublevel()\n outs = map(trace.full_raise, outs)\n outs, cur_todo = primitive.post_process(trace, outs, params)\n if isinstance(primitive, MapPrimitive):\n cur_todo, out_axes_transform = cur_todo\n out_axes_transforms.append(out_axes_transform)\n todo.append(cur_todo)\n yield outs, tuple(todo) # Ensure the aux output is immutable\n\ndef call_bind(primitive: Union['CallPrimitive', 'MapPrimitive'],\n fun, *args, **params):\n out_axes_transforms = _IgnoreElemList()\n if primitive.map_primitive:\n out_axes_thunk = params['out_axes_thunk']\n # The new thunk depends deterministically on the old thunk and the wrapped function.\n # Any caching already has to include the wrapped function as part of the key, so we\n # only use the previous thunk for equality checks.\n @as_hashable_function(closure=out_axes_thunk)\n def new_out_axes_thunk():\n out_axes = out_axes_thunk()\n for t in out_axes_transforms:\n out_axes = t(out_axes)\n return out_axes\n params = dict(params, out_axes_thunk=new_out_axes_thunk)\n params_tuple = tuple(params.items())\n top_trace = find_top_trace(args)\n fun, env_trace_todo = process_env_traces(\n fun, primitive, top_trace and top_trace.level,\n params_tuple, out_axes_transforms)\n tracers = map(top_trace.full_raise, args)\n outs = primitive.process(top_trace, fun, tracers, params)\n return map(full_lower, apply_todos(env_trace_todo(), outs))\n\n\nclass CallPrimitive(Primitive):\n multiple_results = True\n call_primitive = True\n\n def bind(self, fun, *args, **params):\n return call_bind(self, fun, *args, **params)\n\n def process(self, trace, fun, tracers, params):\n return trace.process_call(self, fun, tracers, params)\n\n def post_process(self, trace, out_tracers, params):\n return trace.post_process_call(self, out_tracers, params)\n\ndef call_impl(f: lu.WrappedFun, *args, **params):\n del params # params parameterize the call primitive, not the function\n with new_sublevel():\n return f.call_wrapped(*args)\n\ncall_p = CallPrimitive('call')\ncall = call_p.bind\ncall_p.def_impl(call_impl)\n\nnamed_call_p = CallPrimitive('named_call')\nnamed_call_p.def_impl(call_impl)\n\n# ------------------- Map -------------------\n\ndef mapped_aval(size: int, axis: int, aval: AbstractValue) -> AbstractValue:\n handler, _ = aval_mapping_handlers.get(type(aval), (None, None))\n if handler is not None:\n return handler(size, axis, aval)\n else:\n raise TypeError(f\"no mapping handler for {aval} of type {type(aval)}\")\n\ndef unmapped_aval(size: int, axis_name, axis: int, aval: AbstractValue) -> AbstractValue:\n _, handler = aval_mapping_handlers.get(type(aval), (None, None))\n if handler is not None:\n return handler(size, axis_name, axis, aval)\n else:\n raise TypeError(f\"no unmapping handler for {aval} of type {type(aval)}\")\n\ndef _map_unit(*_) -> AbstractUnit:\n return abstract_unit\n\ndef _map_shaped_array(size: int, axis: int, aval: ShapedArray) -> ShapedArray:\n assert aval.shape[axis] == size\n # TODO: Extend the named shape\n return ShapedArray(tuple_delete(aval.shape, axis), aval.dtype,\n named_shape=aval.named_shape)\n\ndef _unmap_shaped_array(size: int, axis_name, axis: int, aval: ShapedArray) -> ShapedArray:\n named_shape = dict(aval.named_shape)\n # TODO: Make this mandatory\n named_shape.pop(axis_name, None)\n return ShapedArray(tuple_insert(aval.shape, axis, size), aval.dtype,\n named_shape=named_shape)\n\nAvalMapHandlerPair = Tuple[Callable, Callable]\naval_mapping_handlers: Dict[Type, AvalMapHandlerPair] = {\n AbstractUnit: (_map_unit, _map_unit),\n ShapedArray: (_map_shaped_array, _unmap_shaped_array),\n ConcreteArray: (_map_shaped_array, _unmap_shaped_array),\n}\n\n\nclass MapPrimitive(Primitive):\n multiple_results = True\n map_primitive = True\n\n def bind(self, fun, *args, **params):\n assert len(params['in_axes']) == len(args)\n return call_bind(self, fun, *args, **params)\n\n def process(self, trace, fun, tracers, params):\n return trace.process_map(self, fun, tracers, params)\n\n def post_process(self, trace, out_tracers, params):\n return trace.post_process_map(self, out_tracers, params)\n\n@contextmanager\ndef extend_axis_env(axis_name: AxisName, size: int, tag: Any):\n frame = AxisEnvFrame(axis_name, size, tag)\n thread_local_state.trace_state.axis_env.append(frame)\n try:\n yield\n finally:\n thread_local_state.trace_state.axis_env.pop()\n\n@contextmanager\ndef extend_axis_env_nd(axes: Iterable[Tuple[AxisName, int]]):\n frames = [AxisEnvFrame(axis_name, size, None) for axis_name, size in axes]\n thread_local_state.trace_state.axis_env.extend(frames)\n try:\n yield\n finally:\n for _ in frames:\n thread_local_state.trace_state.axis_env.pop()\n\n\n# When a mapped function is given no axis name, we generate a name object based\n# on the id of the function object. Collisions aren't important because this\n# name can't be used in collectives, as user code never gets a ref to this\n# object. We don't want to use the function object itself because that might\n# persist references to the function object.\n# TODO(mattjj): revisit this unique axis name strategy\n@total_ordering\nclass _TempAxisName:\n\n def __init__(self, obj):\n self.id = id(obj)\n\n def __repr__(self):\n return f'<axis {hex(self.id)}>'\n\n def __hash__(self):\n return hash(self.id)\n\n def __eq__(self, other):\n return type(other) is _TempAxisName and self.id == other.id\n\n def __lt__(self, other):\n return type(other) is _TempAxisName and self.id < other.id\n\n\ndef axis_frame(axis_name):\n frames = thread_local_state.trace_state.axis_env\n for frame in reversed(frames):\n if frame.name == axis_name:\n return frame\n named_axes = [frame.name for frame in reversed(frames)\n if not isinstance(frame.name, _TempAxisName)]\n raise NameError(\n f'unbound axis name: {axis_name}. The following axis names (e.g. defined '\n f'by pmap) are available to collective operations: {named_axes}')\n\n\nParamDict = Dict[str, Any]\nAxisSubst = Callable[[AxisName], Tuple[AxisName, ...]]\n\nclass NameGatheringSubst:\n def __init__(self):\n self.axis_names = set()\n def __call__(self, axis_name):\n self.axis_names.add(axis_name)\n return (axis_name,)\n\ndef used_axis_names(primitive: Primitive, params: ParamDict) -> Set[AxisName]:\n subst = NameGatheringSubst()\n subst_axis_names(primitive, params, subst)\n return subst.axis_names\n\ndef subst_axis_names(primitive: Primitive, params: ParamDict, subst: AxisSubst, traverse: bool = True) -> ParamDict:\n if primitive in axis_substitution_rules:\n return axis_substitution_rules[primitive](params, subst, traverse)\n if not traverse:\n return params\n # Default implementation: substitute names in all jaxpr parameters\n if isinstance(primitive, MapPrimitive):\n def shadowed_subst(name):\n return (name,) if name == params['axis_name'] else subst(name)\n else:\n shadowed_subst = subst\n jaxpr_params = [(n, v) for n, v in params.items() if isinstance(v, (Jaxpr, ClosedJaxpr))]\n if not jaxpr_params:\n return params\n new_params = dict(params)\n for name, jaxpr in jaxpr_params:\n new_params[name] = subst_axis_names_jaxpr(jaxpr, shadowed_subst)\n return new_params\n\nclass DuplicateAxisNameError(Exception):\n def __init__(self, var):\n self.var = var\n self.eqn = None\n\ndef subst_axis_names_var(v: Var, subst: AxisSubst, var_map: Dict[Var, Var]) -> Var:\n # Var identity is load-bearing, so we can't have duplicates!\n if v is unitvar: return v\n if v is dropvar: return v\n assert v not in var_map\n if not hasattr(v.aval, 'named_shape'):\n var_map[v] = v\n return v\n names = tuple(it.chain.from_iterable(subst(name) for name in v.aval.named_shape))\n named_shape = {name: axis_frame(name).size for name in names}\n if len(named_shape) != len(names):\n raise DuplicateAxisNameError(v)\n new_v = Var(v.count, v.suffix, v.aval.update(named_shape=named_shape))\n var_map[v] = new_v\n return new_v\n\ndef subst_axis_names_eqn(eqn: JaxprEqn, subst: AxisSubst, var_map: Dict[Var, Var]) -> JaxprEqn:\n invars: List[Atom] = [v if isinstance(v, Literal) else var_map[v] for v in eqn.invars]\n try:\n outvars = [subst_axis_names_var(v, subst, var_map) for v in eqn.outvars]\n except DuplicateAxisNameError as e:\n e.eqn = eqn\n raise\n params = subst_axis_names(eqn.primitive, eqn.params, subst)\n return new_jaxpr_eqn(invars, outvars, eqn.primitive, params, eqn.source_info)\n\ndef do_subst_axis_names_jaxpr(jaxpr: Union[Jaxpr, ClosedJaxpr], subst: AxisSubst):\n consts = None\n if isinstance(jaxpr, ClosedJaxpr):\n consts = jaxpr.consts\n jaxpr = jaxpr.jaxpr\n var_map: Dict[Var, Var] = {unitvar: unitvar}\n invars = [subst_axis_names_var(v, subst, var_map) for v in jaxpr.invars]\n constvars = [subst_axis_names_var(v, subst, var_map) for v in jaxpr.constvars]\n eqns = [subst_axis_names_eqn(eqn, subst, var_map) for eqn in jaxpr.eqns]\n outvars: List[Atom] = [v if isinstance(v, Literal) else var_map[v] for v in jaxpr.outvars]\n new_jaxpr = Jaxpr(constvars, invars, outvars, eqns)\n if consts is not None:\n return ClosedJaxpr(new_jaxpr, consts)\n return new_jaxpr\n\n@cache()\ndef used_axis_names_jaxpr(jaxpr: Union[Jaxpr, ClosedJaxpr]):\n subst = NameGatheringSubst()\n do_subst_axis_names_jaxpr(jaxpr, subst)\n return frozenset(subst.axis_names)\n\ndef subst_axis_names_jaxpr(jaxpr: Union[Jaxpr, ClosedJaxpr], subst: AxisSubst):\n if isinstance(subst, NameGatheringSubst): # This is a common case, so we optimize it!\n subst.axis_names |= used_axis_names_jaxpr(jaxpr)\n return jaxpr\n return do_subst_axis_names_jaxpr(jaxpr, subst)\n\n\naxis_substitution_rules: Dict[Primitive, Callable[[ParamDict, AxisSubst, bool], ParamDict]] = {}\n\n# ------------------- AxisPrimitive -------------------\n# Primitives that store axis names in params and want those axis names to\n# participate in dispatch should subclass AxisPrimitive.\n\nclass AxisPrimitive(Primitive):\n _dispatch_on_params = True\n\n# ------------------- Jaxpr checking -------------------\n\ndef typecheck(aval: AbstractValue, x) -> bool:\n return typecompat(aval, get_aval(x))\n\ndef typecompat(aval_ref: AbstractValue, aval: AbstractValue) -> bool:\n \"\"\"Determine whether `aval` conforms to `aval_ref`.\n\n Ignores weak_type and named_shape, other than to check that an axis name isn't\n used with different sizes.\n \"\"\"\n try:\n return typematch(aval_ref, lattice_join(aval_ref, aval))\n except TypeError:\n return False\n\ndef typematch(aval1: AbstractValue, aval2: AbstractValue) -> bool:\n \"\"\"Determine whether `aval1` and `aval2` are equivalent.\n\n Ignores weak_type and named_shape, other than to check that an axis name isn't\n used with different sizes.\n \"\"\"\n if aval1 == aval2: return True\n # unequal avals may still represent the same type, because type is represented\n # by avals at the shaped level, and because weak type tags and (for now) named\n # shape components aren't considered part of the type\n if isinstance(aval1, ShapedArray) and isinstance(aval2, ShapedArray):\n # a bonus check for whether any named axes have inconsistent sizes\n join_named_shapes(aval1.named_shape, aval2.named_shape)\n return (raise_to_shaped(aval1, weak_type=False).strip_named_shape() ==\n raise_to_shaped(aval2, weak_type=False).strip_named_shape())\n\nclass JaxprTypeError(TypeError): pass\n\ndef typecheck_assert(pred, msg):\n if not pred:\n raise JaxprTypeError(msg)\n\ncustom_typechecks: Dict[Primitive, Callable] = {}\n\ndef check_jaxpr(jaxpr: Jaxpr):\n \"\"\"Checks well-formedness of a jaxpr.\n\n Specifically, check that:\n - variables that are read are bound beforehand\n - variables are typed equally throughout a jaxpr\n - variable type annotations are compatible with their binding expression\n\n Raises `JaxprTypeError` if `jaxpr` is determined invalid. Returns `None`\n otherwise.\n \"\"\"\n try:\n _check_jaxpr(jaxpr, [v.aval for v in jaxpr.invars])\n except JaxprTypeError as e:\n if len(e.args) == 2:\n msg, eqn_idx = e.args\n jaxpr_str = str(pp_jaxpr_eqn_range(jaxpr, eqn_idx - 10, eqn_idx + 10))\n else:\n msg, = e.args\n jaxpr_str = str(pp_jaxpr_eqn_range(jaxpr, 0, 20))\n msg = \"\\n\\n\".join([msg, \"while checking jaxpr:\", jaxpr_str])\n raise JaxprTypeError(msg) from None\n\ndef _check_jaxpr(jaxpr: Jaxpr, in_avals: Sequence[AbstractValue]):\n\n def read(v: Atom) -> AbstractValue:\n if isinstance(v, Literal):\n return raise_to_shaped(get_aval(v.val))\n else:\n typecheck_assert(v in env, f\"Variable '{v}' not defined\")\n return env[v]\n\n def write(v: Var, a: AbstractValue) -> None:\n typecheck_assert(v not in env, f\"Variable '{v}' already bound\")\n if v is not dropvar:\n typecheck_assert(typecompat(v.aval, a),\n f\"Variable '{v}' inconsistently typed as {a}, \"\n f\"bound as {v.aval}\")\n env[v] = a\n\n env : Dict[Var, AbstractValue] = {}\n\n write(unitvar, abstract_unit)\n map(write, jaxpr.constvars, [v.aval for v in jaxpr.constvars])\n map(write, jaxpr.invars, in_avals)\n\n for eqn_idx, eqn in enumerate(jaxpr.eqns):\n prim = eqn.primitive\n try:\n in_avals = map(read, eqn.invars)\n typecheck_assert(all(not isinstance(ina, ConcreteArray) for ina in in_avals),\n \"Equation given ConcreteArray type inputs\")\n if prim in custom_typechecks:\n out_avals = custom_typechecks[prim](*in_avals, **eqn.params)\n if out_avals is None:\n out_avals = [v.aval for v in eqn.outvars]\n elif prim.call_primitive:\n out_avals = check_call(prim, in_avals, eqn.params)\n elif prim.map_primitive:\n out_avals = check_map(prim, in_avals, eqn.params)\n else:\n out_avals = check_eqn(prim, in_avals, eqn.params)\n map(write, eqn.outvars, out_avals)\n except JaxprTypeError as e:\n msg, = e.args\n src = source_info_util.summarize(eqn.source_info)\n msg = \"\\n\\n\".join([msg, \"in equation:\", str(pp_eqn(eqn).indent(2)),\n f\"from source: {src}\"])\n raise JaxprTypeError(msg, eqn_idx) from None\n\n map(read, jaxpr.outvars)\n\ndef check_eqn(prim, in_avals, params):\n for jaxpr in jaxprs_in_params(params):\n check_jaxpr(jaxpr)\n\n out_avals = prim.abstract_eval(*in_avals, **params)\n if not prim.multiple_results:\n out_avals = [out_avals]\n return out_avals\n\ndef check_call(prim, in_avals, params):\n typecheck_assert(\"call_jaxpr\" in params,\n f\"Call primitive {prim} missing 'call_jaxpr' parameter\")\n call_jaxpr = params[\"call_jaxpr\"]\n\n # These checks also happen in recursive call, but give better errors here.\n typecheck_assert(len(in_avals) == len(call_jaxpr.invars),\n f\"Call primitive {prim} with {len(call_jaxpr.invars)} \"\n f\"operands cannot call jaxpr with {len(call_jaxpr.invars)} \"\n f\"inputs\")\n binder_avals = [v.aval for v in call_jaxpr.invars]\n for binder_aval, in_aval in zip(binder_avals, in_avals):\n typecheck_assert(typecompat(binder_aval, in_aval),\n f\"Call primitive {prim} passes operand {in_aval} \"\n f\"to jaxpr expecting {binder_aval}\")\n\n _check_jaxpr(call_jaxpr, in_avals)\n\n out_avals = [v.aval for v in call_jaxpr.outvars]\n return out_avals\n\ndef check_map(prim, in_avals, params):\n typecheck_assert(\"call_jaxpr\" in params,\n f\"Map primitive {prim} missing 'call_jaxpr' parameter\")\n call_jaxpr = params[\"call_jaxpr\"]\n typecheck_assert(\"axis_size\" in params,\n f\"Map primitive {prim} missing 'axis_size' parameter\")\n axis_size = params[\"axis_size\"]\n typecheck_assert(\"axis_name\" in params,\n f\"Map primitive {prim} missing 'axis_name' parameter\")\n axis_name = params[\"axis_name\"]\n typecheck_assert(\"in_axes\" in params,\n f\"Map primitive {prim} missing 'in_axes' parameter\")\n in_axes = params[\"in_axes\"]\n typecheck_assert(\"out_axes\" in params,\n f\"Map primitive {prim} missing 'out_axes' parameter\")\n out_axes = params[\"out_axes\"]\n\n binder_avals = [unmapped_aval(axis_size, axis_name, in_axis, v.aval)\n if in_axis is not None else v.aval\n for v, in_axis in zip(call_jaxpr.invars, in_axes)]\n for binder_aval, in_aval in zip(binder_avals, in_avals):\n typecheck_assert(typecompat(binder_aval, in_aval),\n f\"Call primitive {prim} passes operand {in_aval} \"\n f\"to jaxpr expecting {binder_aval}\")\n\n mapped_avals = [mapped_aval(axis_size, in_axis, aval)\n if in_axis is not None else aval\n for aval, in_axis in zip(in_avals, in_axes)]\n with extend_axis_env(params['axis_name'], axis_size, None):\n _check_jaxpr(call_jaxpr, mapped_avals)\n\n mapped_out_avals = [v.aval for v in call_jaxpr.outvars]\n out_avals = [unmapped_aval(axis_size, axis_name, out_axis, aval) if out_axis is not None else aval\n for aval, out_axis in zip(mapped_out_avals, out_axes)]\n return out_avals\n\n\n# ------------------- Jaxpr printed representation -------------------\n\ndef pp_vars(vs: Sequence[Any], print_shapes: bool = False) -> str:\n if print_shapes:\n return ' '.join(f'{v}:{v.aval.str_short()}' for v in vs)\n else:\n return ' '.join(map(str, vs))\n\ndef pp_eqn_compact(primitive_name: str, params: Dict) -> PrettyPrint:\n filtered_params = {k: v for k, v in params.items()\n if (k != 'branches' and\n not isinstance(v, (Jaxpr, ClosedJaxpr)))}\n return pp(primitive_name) >> pp_kv_pairs(sorted(filtered_params.items()))\n\ndef pp_eqn(eqn: JaxprEqn, print_shapes: bool = False) -> PrettyPrint:\n lhs = pp_vars(eqn.outvars, print_shapes)\n pp_lhs = pp(f'{lhs} =')\n pp_rhs = (pp(eqn.primitive.name) >>\n pp_kv_pairs(sorted(eqn.params.items())) >> pp(' ') >>\n pp(pp_vars(eqn.invars, print_shapes)))\n if len(lhs) <= 6 or print_shapes:\n return pp_lhs >> pp(' ') >> pp_rhs\n else:\n return pp_lhs + pp_rhs.indent(2)\n\ndef pp_eqns(eqns: Sequence[JaxprEqn],\n source_info: bool = False) -> Sequence[PrettyPrint]:\n pps = map(pp_eqn, eqns)\n if source_info:\n l = max((i + len(s) for x in pps for i, s in x.lines), default=None)\n if l is not None:\n return [p.annotate(l, source_info_util.summarize(e.source_info))\n for e, p in zip(eqns, pps)]\n return pps\n\ndef pp_jaxpr(jaxpr: Jaxpr, source_info: bool = False) -> PrettyPrint:\n pps = pp_eqns(jaxpr.eqns, source_info=source_info)\n str_outvars = str(tuple(jaxpr.outvars))\n return (pp('{{ lambda {} ; {}.'.format(pp_vars(jaxpr.constvars),\n pp_vars(jaxpr.invars))) +\n ((pp('let ') >> vcat(pps))\n + pp('in {} }}'.format(str_outvars))).indent(2))\n\ndef pp_jaxpr_eqn_range(jaxpr: Jaxpr, lo: int, hi: int,\n source_info: bool = False) -> PrettyPrint:\n lo = max(lo, 0)\n hi = max(lo, min(hi, len(jaxpr.eqns)))\n eqns = jaxpr.eqns[lo:hi]\n pps = []\n if len(eqns) == 0 and len(jaxpr.eqns) != 0:\n pps.append(pp('...'))\n else:\n if lo != 0:\n pps.append(pp('...'))\n pps.extend(pp_eqns(eqns, source_info=source_info))\n if hi != len(jaxpr.eqns):\n pps.append(pp('...'))\n str_outvars = str(tuple(jaxpr.outvars))\n return (pp('{{ lambda {} ; {}.'.format(pp_vars(jaxpr.constvars),\n pp_vars(jaxpr.invars))) +\n ((pp('let ') >> vcat(pps))\n + pp('in {} }}'.format(str_outvars))).indent(2))\n\ndef pp_jaxprs(jaxprs) -> PrettyPrint:\n jaxprs = [j.jaxpr if isinstance(j, ClosedJaxpr) else j for j in jaxprs]\n return pp('( ') >> vcat(map(pp_jaxpr, jaxprs)) >> pp(' )')\n\ndef pp_kv_pair(k, v):\n if type(v) is tuple and all(isinstance(j, (Jaxpr, ClosedJaxpr)) for j in v):\n pp_v = pp_jaxprs(v)\n else:\n pp_v = pp(v)\n return pp(f'{k}=') >> pp_v\n\ndef pp_kv_pairs(kv_pairs):\n if kv_pairs:\n return pp('[ ') >> vcat([pp_kv_pair(k, v) for k, v in kv_pairs]) >> pp(' ]')\n else:\n return pp('')\n" ]
[ [ "numpy.result_type", "numpy.prod", "numpy.dtype", "numpy.shape" ] ]
Jean1dev/MachineLearning_dataScience
[ "a77cb60b29dd37ee039082e7318fca61322ffecb" ]
[ "scripts/naive-bayes-census.py" ]
[ "import pandas as pd\n\nbase = pd.read_csv('census.csv')\n\nprevisores = base.iloc[:, 0:14].values\nclasse = base.iloc[:, 14].values\n \nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_previsores = LabelEncoder()\nprevisores[:, 1] = labelencoder_previsores.fit_transform(previsores[:, 1])\nprevisores[:, 3] = labelencoder_previsores.fit_transform(previsores[:, 3])\nprevisores[:, 5] = labelencoder_previsores.fit_transform(previsores[:, 5])\nprevisores[:, 6] = labelencoder_previsores.fit_transform(previsores[:, 6])\nprevisores[:, 7] = labelencoder_previsores.fit_transform(previsores[:, 7])\nprevisores[:, 8] = labelencoder_previsores.fit_transform(previsores[:, 8])\nprevisores[:, 9] = labelencoder_previsores.fit_transform(previsores[:, 9])\nprevisores[:, 13] = labelencoder_previsores.fit_transform(previsores[:, 13])\n\nonehotencoder = OneHotEncoder(categorical_features = [1,3,5,6,7,8,9,13])\nprevisores = onehotencoder.fit_transform(previsores).toarray()\n\nlabelencoder_classe = LabelEncoder()\nclasse = labelencoder_classe.fit_transform(classe)\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nprevisores = scaler.fit_transform(previsores)\n\nfrom sklearn.cross_validation import train_test_split\nprevisores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores, classe, test_size=0.15, random_state=0)\n\nfrom sklearn.naive_bayes import GaussianNB\nclassificador = GaussianNB()\nclassificador.fit(previsores_treinamento, classe_treinamento)\nprevisoes = classificador.predict(previsores_teste)\n\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nprecisao = accuracy_score(classe_teste, previsoes)\nmatriz = confusion_matrix(classe_teste, previsoes)" ]
[ [ "sklearn.preprocessing.LabelEncoder", "sklearn.metrics.confusion_matrix", "sklearn.preprocessing.StandardScaler", "sklearn.naive_bayes.GaussianNB", "sklearn.metrics.accuracy_score", "pandas.read_csv", "sklearn.cross_validation.train_test_split", "sklearn.preprocessing.OneHotEncoder" ] ]
RicoFerdian/smallopticalsorter
[ "835d5b696b586e532b35fff2c9fcd95b648a6442" ]
[ "utils/prepare-beans.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\"create-bean-images\n\nUsage:\n prepare-beans.py --resolution=<res> [--debug] <file>\n prepare-beans.py (-h | --help)\n prepare-beans.py --version\n\nOptions:\n -r <res>, --resolution=<res> Image file resolution in px/mm.\n -d, --debug Also write an image showing the applied thresholding and recognized objects.\n -h, --help Show this screen.\n --version Show version.\n\n\"\"\"\nimport os\nimport cv2\nimport numpy as np\nfrom docopt import docopt\n\n\n#### SECTION 1: INPUT\n\n# Command line arguments as prepared by Docopt.\n# Reference: https://github.com/docopt/docopt\narguments = docopt(__doc__, version='create-bean-images 0.1')\n# print(arguments)\n\nresolution = float(arguments['--resolution']) # px/mm\n\n# Output image width and height.\n# (Output images should cover a physical size of 14.35*14.35 mm always.)\nimg_target_size = int(14.35 * resolution)\n\nfilename = arguments['<file>']\nfilename_beans_prefix = os.path.splitext(filename)[0] + '.'\nfilename_debug_bw = os.path.splitext(filename)[0] + '.debug1.jpg'\nfilename_debug_rgb = os.path.splitext(filename)[0] + '.debug2.jpg'\n\n# Load the image.\nimg = cv2.imread(filename)\n\n# Determine the image dimensions.\nimg_height, img_width = img.shape[:2]\n# print(\"DEBUG: img_height = \", img_height, \", img_width = \", img_width)\n\n# Block size for OpenCV adaptive thresholding. (Must be an uneven number.)\nthresh_blocksize = int( max(img_height, img_width) * 0.25 )\nif thresh_blocksize % 2 == 0: thresh_blocksize += 1\n\n\n#### SECTION 2: IMAGE PROCESSING\n\n# Convert to grayscale.\nimg_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# Smooth the image to avoid noises.\nimg_gray = cv2.medianBlur(img_gray, 5)\n\n# Apply adaptive threshold.\n# Reference: https://docs.opencv.org/3.4.0/d7/d1b/group__imgproc__misc.html#ga72b913f352e4a1b1b397736707afcde3\nimg_bw = cv2.adaptiveThreshold(img_gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, thresh_blocksize, 0)\n\n# Find the contours.\n# Reference: https://docs.opencv.org/3.4.0/d3/dc0/group__imgproc__shape.html#ga17ed9f5d79ae97bd4c7cf18403e1689a\ncontours = cv2.findContours(img_bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]\n\n# Convert the image from gray to color mode, so we can draw in color on it later.\nimg_debug_bw = cv2.cvtColor(img_bw, cv2.COLOR_GRAY2BGR)\nimg_debug_rgb = img.copy()\n\n# For each contour, find the bounding box, draw it, save it.\nimg_num = 1\nfor cnt in contours:\n x,y,w,h = cv2.boundingRect(cnt)\n\n # Draw a green bounding box around the contour (in both original and b&w versions).\n cv2.rectangle(img_debug_bw, (x,y), (x+w, y+h), (0,255,0), 2)\n cv2.rectangle(img_debug_rgb, (x,y), (x+w, y+h), (0,255,0), 2)\n\n # Skip thresholding artifacts (anything smaller than 10 mm²).\n if (h*w < 10 * resolution * resolution): continue\n\n # Grow the bounding box to img_target_size * img_target_size (where possible).\n center_x = x + w//2\n center_y = y + h//2\n x = max(center_x - (img_target_size//2), 0)\n y = max(center_y - (img_target_size//2), 0)\n w = img_target_size\n if (x+w > img_width): w = img_width - x\n h = img_target_size\n if (y+h > img_height): h = img_height - y\n\n # Extract the bounding box content (\"region of interest\", hopefully a bean)\n roi = img[y:y+h, x:x+w]\n\n # Pad the bounding box to img_target_size * img_target_size, if needed.\n if h < img_target_size or w < img_target_size:\n # Make a 3-channel canvas in targeted size.\n roi_canvas = np.zeros([img_target_size, img_target_size, 3], dtype=np.uint8)\n # Fill image with white = (255,255,255).\n roi_canvas.fill(255)\n\n # Mount ROI input image centered on the canvas.\n h_offset = (img_target_size - h) // 2\n w_offset = (img_target_size - w) // 2\n roi_canvas[h_offset:h_offset+h, w_offset:w_offset+w] = roi\n\n roi = roi_canvas\n\n # Save the ROI as JPEG image. (Image format is chosen by extension. \".png\" also works.)\n # Reference: https://docs.opencv.org/3.4.0/d4/da8/group__imgcodecs.html#gabbc7ef1aa2edfaa87772f1202d67e0ce\n cv2.imwrite(filename_beans_prefix + str(img_num).zfill(2) + \".jpg\", roi, [cv2.IMWRITE_JPEG_QUALITY, 98])\n img_num += 1\n\n# Save images for visual debugging (bounding boxes and thresholding).\nif arguments['--debug']:\n cv2.imwrite(filename_debug_bw, img_debug_bw)\n cv2.imwrite(filename_debug_rgb, img_debug_rgb)\n" ]
[ [ "numpy.zeros" ] ]
Ulian7/DeepCTR
[ "d8f519a722a4d6a4f1fe18e04af54cfd1369c9a5" ]
[ "deepctr_torch/models/din.py" ]
[ "# -*- coding:utf-8 -*-\n\"\"\"\nAuthor:\n Yuef Zhang\nReference:\n [1] Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068. (https://arxiv.org/pdf/1706.06978.pdf)\n\"\"\"\n\nfrom .basemodel import BaseModel\nfrom ..inputs import *\nfrom ..layers import *\nfrom ..layers.sequence import AttentionSequencePoolingLayer\nfrom config import C\nfrom . import causalD as utils\nimport torch.nn as nn\nimport torch\n# import .causalD as utils\n\n\n\nclass DIN(BaseModel):\n \"\"\"Instantiates the Deep Interest Network architecture.\n\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param history_feature_list: list,to indicate sequence sparse field\n :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net\n :param dnn_activation: Activation function to use in deep net\n :param att_hidden_size: list,list of positive integer , the layer number and units in each layer of attention net\n :param att_activation: Activation function to use in attention net\n :param att_weight_normalization: bool. Whether normalize the attention score of local activation unit.\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param init_std: float,to use as the initialize std of embedding vector\n self.CD_linear = nn.Linear(temp, 2 * self.z1_dim * self.z2_dim)\n :param seed: integer ,to use as random seed.\n :param task: str, ``\"binary\"`` for binary logloss or ``\"regression\"`` for regression loss\n :return: A PyTorch model instance.\n\n \"\"\"\n\n def __init__(self, dnn_feature_columns, history_feature_list, dnn_use_bn=False,\n dnn_hidden_units=(256, 128), dnn_activation='relu', att_hidden_size=(64, 16),\n att_activation='Dice', att_weight_normalization=False, l2_reg_dnn=0.0,\n l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001,\n seed=1024, task='binary', device='cpu', teacher=False,flag_stage=None):\n super(DIN, self).__init__([], dnn_feature_columns, l2_reg_linear=0, l2_reg_embedding=l2_reg_embedding,\n init_std=init_std, seed=seed, task=task, device=device)\n\n self.sparse_feature_columns = list(\n filter(lambda x: isinstance(x, SparseFeat), dnn_feature_columns)) if dnn_feature_columns else []\n self.varlen_sparse_feature_columns = list(\n filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else []\n\n self.history_feature_list = history_feature_list\n\n self.history_feature_columns = []\n self.sparse_varlen_feature_columns = []\n self.history_fc_names = list(map(lambda x: \"hist_\" + x, history_feature_list))\n\n for fc in self.varlen_sparse_feature_columns:\n feature_name = fc.name\n if feature_name in self.history_fc_names:\n self.history_feature_columns.append(fc)\n else:\n self.sparse_varlen_feature_columns.append(fc)\n att_emb_dim = self._compute_interest_dim()\n############################################add attribute:self.att_score########################################\n self.attention = AttentionSequencePoolingLayer(att_hidden_units=att_hidden_size,\n embedding_dim=att_emb_dim,\n att_activation=att_activation,\n return_score=False,\n supports_masking=False,\n weight_normalization=att_weight_normalization)\n import pdb\n if C.frontdoor and not teacher:\n self.frontdoor_attn = AttentionSequencePoolingLayer(att_hidden_units=att_hidden_size,\n embedding_dim=128,\n att_activation=att_activation,\n return_score=False,\n supports_masking=False,\n weight_normalization=att_weight_normalization)\n\n \n\n temp=self.compute_input_dim(dnn_feature_columns) - C.attr_dim\n import pdb\n\n self.dnn = DNN(inputs_dim=temp,\n hidden_units=dnn_hidden_units,\n activation=dnn_activation,\n dropout_rate=dnn_dropout,\n l2_reg=l2_reg_dnn,\n use_bn=dnn_use_bn)\n self.dnn_linear = nn.Linear(dnn_hidden_units[-1], 1, bias=False).to(device)\n if C.CausalD:\n self.z1_dim = self.z2_dim = 8\n self.CD_linear = nn.Linear(temp, 2 * self.z1_dim * self.z2_dim)\n self.dag = utils.DagLayer(self.z1_dim, self.z1_dim)\n self.in_dim = self.z1_dim * self.z2_dim\n self.decoder = nn.Sequential(\n nn.Linear(self.in_dim, self.in_dim // 2),\n nn.Linear(self.in_dim // 2, 8),\n )\n\n elif C.CausalD_wo_dag:\n self.z1_dim = self.z2_dim = 8\n self.CD_linear = nn.Linear(temp, self.z1_dim * self.z2_dim)\n\n\n if C.CausalD or C.CausalD_wo_dag:\n self.gender_cls = nn.Sequential(\n nn.Linear(self.z2_dim, self.z2_dim // 2),\n nn.ELU(),\n nn.Linear(self.z2_dim // 2, 2),\n )\n self.age_reg = nn.Sequential(\n nn.Linear(self.z2_dim, self.z2_dim // 2),\n nn.ELU(),\n nn.Linear(self.z2_dim // 2, 1),\n )\n self.occu_cls = nn.Sequential(\n nn.Linear(self.z2_dim, self.z2_dim * 2),\n nn.ELU(),\n nn.Linear(self.z2_dim * 2, 21),\n )\n self.zip_cls = nn.Sequential(\n nn.Linear(self.z2_dim, self.z2_dim * 2),\n nn.ELU(),\n nn.Linear(self.z2_dim * 2, self.z2_dim * 4),\n nn.ELU(),\n nn.Linear(self.z2_dim * 4, 3439),\n )\n self.title_reg = nn.Sequential(\n nn.Linear(self.z2_dim, self.z2_dim * 2),\n nn.ELU(),\n nn.Linear(self.z2_dim * 2, self.z2_dim * 4),\n nn.ELU(),\n nn.Linear(self.z2_dim * 4, 512),\n )\n self.genres_cls = nn.Sequential(\n nn.Linear(self.z2_dim, self.z2_dim * 2),\n nn.ELU(),\n nn.Linear(self.z2_dim * 2, 18),\n )\n self.dnn = DNN(inputs_dim=self.z1_dim * self.z2_dim,\n hidden_units=dnn_hidden_units,\n activation=dnn_activation,\n dropout_rate=dnn_dropout,\n l2_reg=l2_reg_dnn,\n use_bn=dnn_use_bn)\n\n\n\n\n\n\n self.to(device)\n\n\n def forward(self, X,only_score=False, front_dic=None):\n import pdb\n _, dense_value_list = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)\n\n # sequence pooling part\n query_emb_list = embedding_lookup(X, self.embedding_dict, self.feature_index, self.sparse_feature_columns,\n return_feat_list=self.history_feature_list, to_list=True)\n # target_item的embedding矩阵\n keys_emb_list = embedding_lookup(X, self.embedding_dict, self.feature_index, self.history_feature_columns,\n return_feat_list=self.history_fc_names, to_list=True)\n dnn_input_emb_list = embedding_lookup(X, self.embedding_dict, self.feature_index, self.sparse_feature_columns,\n to_list=True)\n # 此时dnn_input_emb_list是由sparse_features组成的list矩阵\n sequence_embed_dict = varlen_embedding_lookup(X, self.embedding_dict, self.feature_index,\n self.sparse_varlen_feature_columns)\n\n sequence_embed_list = get_varlen_pooling_list(sequence_embed_dict, X, self.feature_index,\n self.sparse_varlen_feature_columns, self.device)\n # sequence_embed_list 是和hist_mid相同的可变长的序列特征(和hist_mid不同)\n dnn_input_emb_list += sequence_embed_list\n deep_input_emb = torch.cat(dnn_input_emb_list, dim=-1)\n\n # concatenate\n query_emb = torch.cat(query_emb_list, dim=-1) # [B, 1, E]\n keys_emb = torch.cat(keys_emb_list, dim=-1) # [B, T, E] : T the number of hist_item\n\n keys_length_feature_name = [feat.length_name for feat in self.varlen_sparse_feature_columns if\n feat.length_name is not None]\n keys_length = torch.squeeze(maxlen_lookup(X, self.feature_index, keys_length_feature_name), 1) # [B, 1]\n # keys_length表示hist_item的长度\n import pdb\n hist,att_score,keys = self.attention(query_emb, keys_emb, keys_length) # [B, 1, E],[B, 1, T]\n if C.frontdoor and front_dic is not None:\n group_att_score = torch.stack(front_dic['group_attn'], dim=1)\n group_out = torch.bmm(group_att_score, keys)\n group_deep_input_emb = torch.cat((deep_input_emb.repeat(1, C.group_num, 1), group_out), dim=-1)\n group_deep_input_emb = group_deep_input_emb.view(-1, group_deep_input_emb.size(-1))\n group_dnn_input = combined_dnn_input([group_deep_input_emb], dense_value_list)\n group_dnn_output = self.dnn(group_dnn_input)\n group_dnn_output = group_dnn_output.reshape(-1, C.group_num, group_dnn_output.size(-1))\n # group_dnn_logit = self.dnn_linear(group_dnn_output)\n\n # group_y_pred = self.out(group_dnn_logit)\n\n # deep part\n deep_input_emb = torch.cat((deep_input_emb, hist), dim=-1)\n deep_input_emb = deep_input_emb.view(deep_input_emb.size(0), -1)\n\n fd_pred = None\n dnn_input = combined_dnn_input([deep_input_emb], [])\n import pdb\n \n\n causalD_loss = 0.\n if C.CausalD:\n \n CD_dnn_input = self.CD_linear(dnn_input)\n lm, lv = utils.gaussian_parameters(CD_dnn_input)\n lm, lv = lm.reshape([-1, self.z1_dim,self.z2_dim]), torch.ones(lm.size()[0], self.z1_dim, self.z2_dim).to(lm.device)\n\n pm, pv = torch.zeros(lm.size()).to(lm.device), torch.ones(lv.size()).to(lm.device)\n fdim = self.z1_dim * self.z2_dim\n kl_loss = utils.kl_normal(lm.view(-1, fdim), lv.view(-1, fdim), pm.view(-1, fdim), pv.view(-1, fdim))\n kl_loss = torch.sum(kl_loss)\n\n\n \n qm, qv = self.dag.calculate_dag(lm, torch.ones(lm.size()[0], self.z1_dim, self.z2_dim).to(lm.device))\n qm, qv = qm.reshape([-1, self.z1_dim,self.z2_dim]), qv\n\n attr_loss = 0.\n dense_value = dense_value_list[0]\n # 'gender','age','occupation','zip', 'title','genres', 'title_vec', 'genres_vec'\n gender = dense_value[:,0].type(torch.LongTensor).to(qm.device)\n g_logits = self.gender_cls(qm[:,0,:])\n attr_loss += nn.CrossEntropyLoss(reduction='sum')(g_logits, gender)\n\n age = dense_value[:,1].to(qm.device) / C.max_age\n a_logits = self.age_reg(qm[:,1,:])\n attr_loss += nn.MSELoss(reduction='sum')(a_logits.squeeze(), age.squeeze())\n\n occu = dense_value[:,2].type(torch.LongTensor).to(qm.device)\n o_logits = self.occu_cls(qm[:,2,:])\n attr_loss += nn.CrossEntropyLoss(reduction='sum')(o_logits, occu)\n\n zip_c = dense_value[:,3].type(torch.LongTensor).to(qm.device)\n z_logits = self.zip_cls(qm[:,3,:])\n attr_loss += nn.CrossEntropyLoss(reduction='sum')(z_logits, zip_c)\n import pdb\n\n title_vec = dense_value[:, 4:516]\n title_vec_pred = self.title_reg(qm[:, 4, :])\n attr_loss += nn.MSELoss(reduction='sum')(title_vec_pred.squeeze(), title_vec.squeeze())\n\n import pdb\n \n genres_vec = dense_value[:, -18:]\n ge_logits = nn.Sigmoid()(self.genres_cls(qm[:, 5, :]))\n attr_loss += nn.BCELoss(reduction='sum')(ge_logits, genres_vec)\n \n\n z = utils.conditional_sample_gaussian(qm, qv * C.lambdav)\n z = z.reshape(z.size(0), -1)\n decoded = self.decoder(z)\n decoded_loss = torch.nn.MSELoss(reduction='sum')(decoded.squeeze(), query_emb.squeeze())\n\n\n dag_A = self.dag.A\n acyclicity_loss = utils._h_A(dag_A, dag_A.size(0))\n # CausalVAE loss func\n acyclicity_loss = acyclicity_loss * C.acy1_lambda + acyclicity_loss * acyclicity_loss * C.acy2_lambda\n\n\n dnn_input = qm.reshape(qm.size(0), -1)\n \n causalD_loss = kl_loss * C.kl_lambda + attr_loss * C.attr_lambda + decoded_loss * C.decoded_lambda + acyclicity_loss\n # causalD_loss = kl_loss + decoded_loss\n elif C.CausalD_wo_dag:\n CD_dnn_input = self.CD_linear(dnn_input)\n qm = CD_dnn_input.reshape([-1, self.z1_dim, self.z2_dim])\n \n attr_loss = 0.\n dense_value = dense_value_list[0]\n # 'gender','age','occupation','zip', 'title','genres', 'title_vec', 'genres_vec'\n gender = dense_value[:,0].type(torch.LongTensor).to(qm.device)\n g_logits = self.gender_cls(qm[:,0,:])\n attr_loss += nn.CrossEntropyLoss(reduction='sum')(g_logits, gender)\n\n age = dense_value[:,1].to(qm.device) / C.max_age\n a_logits = self.age_reg(qm[:,1,:])\n attr_loss += nn.MSELoss(reduction='sum')(a_logits.squeeze(), age.squeeze())\n\n occu = dense_value[:,2].type(torch.LongTensor).to(qm.device)\n o_logits = self.occu_cls(qm[:,2,:])\n attr_loss += nn.CrossEntropyLoss(reduction='sum')(o_logits, occu)\n\n zip_c = dense_value[:,3].type(torch.LongTensor).to(qm.device)\n z_logits = self.zip_cls(qm[:,3,:])\n attr_loss += nn.CrossEntropyLoss(reduction='sum')(z_logits, zip_c)\n import pdb\n\n title_vec = dense_value[:, 4:516]\n title_vec_pred = self.title_reg(qm[:, 4, :])\n attr_loss += nn.MSELoss(reduction='sum')(title_vec_pred.squeeze(), title_vec.squeeze())\n\n import pdb\n \n genres_vec = dense_value[:, -18:]\n ge_logits = nn.Sigmoid()(self.genres_cls(qm[:, 5, :]))\n attr_loss += nn.BCELoss(reduction='sum')(ge_logits, genres_vec)\n\n causalD_loss = attr_loss * C.attr_lambda\n dnn_input = qm.reshape(qm.size(0), -1)\n\n\n\n\n\n\n\n dnn_output = self.dnn(dnn_input)\n # if(only_score== True):\n # return hist,att_score, None,dnn_output\n\n\n\n\n if C.frontdoor and front_dic is not None:\n group_dnn_output = group_dnn_output.mean(0).unsqueeze(0).repeat(dnn_output.size(0), 1, 1)\n keys_length = torch.ones(dnn_output.size(0)).to(dnn_input.device) * 10\n fd_hist, fd_att_score, fd_keys = self.frontdoor_attn(dnn_output.unsqueeze(1), group_dnn_output, keys_length)\n fd_pred = self.out(self.dnn_linear(fd_hist))\n\n\n \n dnn_logit = self.dnn_linear(dnn_output)\n\n y_pred = self.out(dnn_logit)\n\n return y_pred,att_score, fd_pred,dnn_output, causalD_loss\n\n def _compute_interest_dim(self):\n interest_dim = 0\n for feat in self.sparse_feature_columns:\n if feat.name in self.history_feature_list:\n interest_dim += feat.embedding_dim\n return interest_dim\n\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.stack", "torch.nn.MSELoss", "torch.nn.ELU", "torch.nn.Sigmoid", "torch.bmm", "torch.nn.BCELoss", "torch.nn.CrossEntropyLoss", "torch.sum" ] ]
TensorMSA/hoyai_docker
[ "12f0041e6306d8a6421585a4b51666bad30be442" ]
[ "skp_edu_docker/code/third_party/yolo/yolo/net/yolo_tiny_net.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nimport re\n\nfrom third_party.yolo.yolo.net.net import Net\n\nclass YoloTinyNet(Net):\n\n def __init__(self, common_params, net_params, test=False):\n \"\"\"\n common params: a params dict\n net_params : a params dict\n \"\"\"\n super(YoloTinyNet, self).__init__(common_params, net_params)\n #process params\n self.image_size = int(common_params['image_size'])\n self.num_classes = int(common_params['num_classes'])\n self.cell_size = int(net_params['cell_size'])\n self.boxes_per_cell = int(net_params['boxes_per_cell'])\n self.batch_size = int(common_params['batch_size'])\n self.weight_decay = float(net_params['weight_decay'])\n\n if not test:\n self.object_scale = float(net_params['object_scale'])\n self.noobject_scale = float(net_params['noobject_scale'])\n self.class_scale = float(net_params['class_scale'])\n self.coord_scale = float(net_params['coord_scale'])\n\n def inference(self, images):\n \"\"\"Build the yolo model\n\n Args:\n images: 4-D tensor [batch_size, image_height, image_width, channels]\n Returns:\n predicts: 4-D tensor [batch_size, cell_size, cell_size, num_classes + 5 * boxes_per_cell]\n \"\"\"\n conv_num = 1\n\n temp_conv = self.conv2d('conv' + str(conv_num), images, [3, 3, 3, 16], stride=1)\n conv_num += 1\n\n temp_pool = self.max_pool(temp_conv, [2, 2], 2)\n\n temp_conv = self.conv2d('conv' + str(conv_num), temp_pool, [3, 3, 16, 32], stride=1)\n conv_num += 1\n\n temp_pool = self.max_pool(temp_conv, [2, 2], 2)\n\n temp_conv = self.conv2d('conv' + str(conv_num), temp_pool, [3, 3, 32, 64], stride=1)\n conv_num += 1\n\n temp_conv = self.max_pool(temp_conv, [2, 2], 2)\n\n temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 64, 128], stride=1)\n conv_num += 1\n\n temp_conv = self.max_pool(temp_conv, [2, 2], 2)\n\n temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 128, 256], stride=1)\n conv_num += 1\n\n temp_conv = self.max_pool(temp_conv, [2, 2], 2)\n\n temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 256, 512], stride=1)\n conv_num += 1\n\n temp_conv = self.max_pool(temp_conv, [2, 2], 2)\n\n temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 512, 1024], stride=1)\n conv_num += 1\n\n temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)\n conv_num += 1\n\n temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)\n conv_num += 1\n\n temp_conv = tf.transpose(temp_conv, (0, 3, 1, 2))\n\n #Fully connected layer\n local1 = self.local('local1', temp_conv, self.cell_size * self.cell_size * 1024, 256)\n\n local2 = self.local('local2', local1, 256, 4096)\n\n local3 = self.local('local3', local2, 4096, self.cell_size * self.cell_size * (self.num_classes + self.boxes_per_cell * 5), leaky=False, pretrain=False, train=True)\n\n n1 = self.cell_size * self.cell_size * self.num_classes\n\n n2 = n1 + self.cell_size * self.cell_size * self.boxes_per_cell\n\n class_probs = tf.reshape(local3[:, 0:n1], (-1, self.cell_size, self.cell_size, self.num_classes))\n scales = tf.reshape(local3[:, n1:n2], (-1, self.cell_size, self.cell_size, self.boxes_per_cell))\n boxes = tf.reshape(local3[:, n2:], (-1, self.cell_size, self.cell_size, self.boxes_per_cell * 4))\n\n local3 = tf.concat([class_probs, scales, boxes], 3)\n\n predicts = local3\n\n return predicts\n\n def iou(self, boxes1, boxes2):\n \"\"\"calculate ious\n Args:\n boxes1: 4-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ====> (x_center, y_center, w, h)\n boxes2: 1-D tensor [4] ===> (x_center, y_center, w, h)\n Return:\n iou: 3-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]\n \"\"\"\n boxes1 = tf.stack([boxes1[:, :, :, 0] - boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] - boxes1[:, :, :, 3] / 2,\n boxes1[:, :, :, 0] + boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] + boxes1[:, :, :, 3] / 2])\n boxes1 = tf.transpose(boxes1, [1, 2, 3, 0])\n boxes2 = tf.stack([boxes2[0] - boxes2[2] / 2, boxes2[1] - boxes2[3] / 2,\n boxes2[0] + boxes2[2] / 2, boxes2[1] + boxes2[3] / 2])\n\n #calculate the left up point\n lu = tf.maximum(boxes1[:, :, :, 0:2], boxes2[0:2])\n rd = tf.minimum(boxes1[:, :, :, 2:], boxes2[2:])\n\n #intersection\n intersection = rd - lu\n\n inter_square = intersection[:, :, :, 0] * intersection[:, :, :, 1]\n\n mask = tf.cast(intersection[:, :, :, 0] > 0, tf.float32) * tf.cast(intersection[:, :, :, 1] > 0, tf.float32)\n\n inter_square = mask * inter_square\n\n #calculate the boxs1 square and boxs2 square\n square1 = (boxes1[:, :, :, 2] - boxes1[:, :, :, 0]) * (boxes1[:, :, :, 3] - boxes1[:, :, :, 1])\n square2 = (boxes2[2] - boxes2[0]) * (boxes2[3] - boxes2[1])\n\n return inter_square/(square1 + square2 - inter_square + 1e-6)\n\n def cond1(self, num, object_num, loss, predict, label, nilboy):\n \"\"\"\n if num < object_num\n \"\"\"\n return num < object_num\n\n\n def body1(self, num, object_num, loss, predict, labels, nilboy):\n \"\"\"\n calculate loss\n Args:\n predict: 3-D tensor [cell_size, cell_size, 5 * boxes_per_cell]\n labels : [max_objects, 5] (x_center, y_center, w, h, class)\n \"\"\"\n label = labels[num:num+1, :]\n label = tf.reshape(label, [-1])\n\n #calculate objects tensor [CELL_SIZE, CELL_SIZE]\n min_x = (label[0] - label[2] / 2) / (self.image_size / self.cell_size)\n max_x = (label[0] + label[2] / 2) / (self.image_size / self.cell_size)\n\n min_y = (label[1] - label[3] / 2) / (self.image_size / self.cell_size)\n max_y = (label[1] + label[3] / 2) / (self.image_size / self.cell_size)\n\n min_x = tf.floor(min_x)\n min_y = tf.floor(min_y)\n\n max_x = tf.ceil(max_x)\n max_y = tf.ceil(max_y)\n\n temp = tf.cast(tf.stack([max_y - min_y, max_x - min_x]), dtype=tf.int32)\n objects = tf.ones(temp, tf.float32)\n\n temp = tf.cast(tf.stack([min_y, self.cell_size - max_y, min_x, self.cell_size - max_x]), tf.int32)\n temp = tf.reshape(temp, (2, 2))\n objects = tf.pad(objects, temp, \"CONSTANT\")\n\n #calculate objects tensor [CELL_SIZE, CELL_SIZE]\n #calculate responsible tensor [CELL_SIZE, CELL_SIZE]\n center_x = label[0] / (self.image_size / self.cell_size)\n center_x = tf.floor(center_x)\n\n center_y = label[1] / (self.image_size / self.cell_size)\n center_y = tf.floor(center_y)\n\n response = tf.ones([1, 1], tf.float32)\n\n temp = tf.cast(tf.stack([center_y, self.cell_size - center_y - 1, center_x, self.cell_size -center_x - 1]), tf.int32)\n temp = tf.reshape(temp, (2, 2))\n response = tf.pad(response, temp, \"CONSTANT\")\n #objects = response\n\n #calculate iou_predict_truth [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]\n predict_boxes = predict[:, :, self.num_classes + self.boxes_per_cell:]\n\n\n predict_boxes = tf.reshape(predict_boxes, [self.cell_size, self.cell_size, self.boxes_per_cell, 4])\n\n predict_boxes = predict_boxes * [self.image_size / self.cell_size, self.image_size / self.cell_size, self.image_size, self.image_size]\n\n base_boxes = np.zeros([self.cell_size, self.cell_size, 4])\n\n for y in range(self.cell_size):\n for x in range(self.cell_size):\n #nilboy\n base_boxes[y, x, :] = [self.image_size / self.cell_size * x, self.image_size / self.cell_size * y, 0, 0]\n base_boxes = np.tile(np.resize(base_boxes, [self.cell_size, self.cell_size, 1, 4]), [1, 1, self.boxes_per_cell, 1])\n\n predict_boxes = base_boxes + predict_boxes\n\n iou_predict_truth = self.iou(predict_boxes, label[0:4])\n #calculate C [cell_size, cell_size, boxes_per_cell]\n C = iou_predict_truth * tf.reshape(response, [self.cell_size, self.cell_size, 1])\n\n #calculate I tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]\n I = iou_predict_truth * tf.reshape(response, (self.cell_size, self.cell_size, 1))\n\n max_I = tf.reduce_max(I, 2, keep_dims=True)\n\n I = tf.cast((I >= max_I), tf.float32) * tf.reshape(response, (self.cell_size, self.cell_size, 1))\n\n #calculate no_I tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]\n no_I = tf.ones_like(I, dtype=tf.float32) - I\n\n\n p_C = predict[:, :, self.num_classes:self.num_classes + self.boxes_per_cell]\n\n #calculate truth x,y,sqrt_w,sqrt_h 0-D\n x = label[0]\n y = label[1]\n\n sqrt_w = tf.sqrt(tf.abs(label[2]))\n sqrt_h = tf.sqrt(tf.abs(label[3]))\n #sqrt_w = tf.abs(label[2])\n #sqrt_h = tf.abs(label[3])\n\n #calculate predict p_x, p_y, p_sqrt_w, p_sqrt_h 3-D [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]\n p_x = predict_boxes[:, :, :, 0]\n p_y = predict_boxes[:, :, :, 1]\n\n #p_sqrt_w = tf.sqrt(tf.abs(predict_boxes[:, :, :, 2])) * ((tf.cast(predict_boxes[:, :, :, 2] > 0, tf.float32) * 2) - 1)\n #p_sqrt_h = tf.sqrt(tf.abs(predict_boxes[:, :, :, 3])) * ((tf.cast(predict_boxes[:, :, :, 3] > 0, tf.float32) * 2) - 1)\n #p_sqrt_w = tf.sqrt(tf.maximum(0.0, predict_boxes[:, :, :, 2]))\n #p_sqrt_h = tf.sqrt(tf.maximum(0.0, predict_boxes[:, :, :, 3]))\n #p_sqrt_w = predict_boxes[:, :, :, 2]\n #p_sqrt_h = predict_boxes[:, :, :, 3]\n p_sqrt_w = tf.sqrt(tf.minimum(self.image_size * 1.0, tf.maximum(0.0, predict_boxes[:, :, :, 2])))\n p_sqrt_h = tf.sqrt(tf.minimum(self.image_size * 1.0, tf.maximum(0.0, predict_boxes[:, :, :, 3])))\n #calculate truth p 1-D tensor [NUM_CLASSES]\n P = tf.one_hot(tf.cast(label[4], tf.int32), self.num_classes, dtype=tf.float32)\n\n #calculate predict p_P 3-D tensor [CELL_SIZE, CELL_SIZE, NUM_CLASSES]\n p_P = predict[:, :, 0:self.num_classes]\n\n #class_loss\n class_loss = tf.nn.l2_loss(tf.reshape(objects, (self.cell_size, self.cell_size, 1)) * (p_P - P)) * self.class_scale\n #class_loss = tf.nn.l2_loss(tf.reshape(response, (self.cell_size, self.cell_size, 1)) * (p_P - P)) * self.class_scale\n\n #object_loss\n object_loss = tf.nn.l2_loss(I * (p_C - C)) * self.object_scale\n #object_loss = tf.nn.l2_loss(I * (p_C - (C + 1.0)/2.0)) * self.object_scale\n\n #noobject_loss\n #noobject_loss = tf.nn.l2_loss(no_I * (p_C - C)) * self.noobject_scale\n noobject_loss = tf.nn.l2_loss(no_I * (p_C)) * self.noobject_scale\n\n #coord_loss\n coord_loss = (tf.nn.l2_loss(I * (p_x - x)/(self.image_size/self.cell_size)) +\n tf.nn.l2_loss(I * (p_y - y)/(self.image_size/self.cell_size)) +\n tf.nn.l2_loss(I * (p_sqrt_w - sqrt_w))/ self.image_size +\n tf.nn.l2_loss(I * (p_sqrt_h - sqrt_h))/self.image_size) * self.coord_scale\n\n nilboy = I\n\n return num + 1, object_num, [loss[0] + class_loss, loss[1] + object_loss, loss[2] + noobject_loss, loss[3] + coord_loss], predict, labels, nilboy\n\n\n\n def loss(self, predicts, labels, objects_num):\n \"\"\"Add Loss to all the trainable variables\n\n Args:\n predicts: 4-D tensor [batch_size, cell_size, cell_size, 5 * boxes_per_cell]\n ===> (num_classes, boxes_per_cell, 4 * boxes_per_cell)\n labels : 3-D tensor of [batch_size, max_objects, 5]\n objects_num: 1-D tensor [batch_size]\n \"\"\"\n class_loss = tf.constant(0, tf.float32)\n object_loss = tf.constant(0, tf.float32)\n noobject_loss = tf.constant(0, tf.float32)\n coord_loss = tf.constant(0, tf.float32)\n loss = [0, 0, 0, 0]\n for i in range(self.batch_size):\n predict = predicts[i, :, :, :]\n label = labels[i, :, :]\n object_num = objects_num[i]\n nilboy = tf.ones([7,7,2])\n tuple_results = tf.while_loop(self.cond1, self.body1, [tf.constant(0), object_num, [class_loss, object_loss, noobject_loss, coord_loss], predict, label, nilboy])\n for j in range(4):\n loss[j] = loss[j] + tuple_results[2][j]\n nilboy = tuple_results[5]\n\n tf.add_to_collection('losses', (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size)\n\n tf.summary.scalar('class_loss', loss[0]/self.batch_size)\n tf.summary.scalar('object_loss', loss[1]/self.batch_size)\n tf.summary.scalar('noobject_loss', loss[2]/self.batch_size)\n tf.summary.scalar('coord_loss', loss[3]/self.batch_size)\n tf.summary.scalar('weight_loss', tf.add_n(tf.get_collection('losses')) - (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size )\n\n return tf.add_n(tf.get_collection('losses'), name='total_loss'), nilboy\n" ]
[ [ "tensorflow.ones", "tensorflow.ones_like", "tensorflow.reshape", "numpy.resize", "tensorflow.stack", "tensorflow.ceil", "tensorflow.add_to_collection", "tensorflow.cast", "tensorflow.concat", "tensorflow.transpose", "tensorflow.constant", "tensorflow.pad", "tensorflow.get_collection", "tensorflow.floor", "tensorflow.abs", "tensorflow.minimum", "numpy.zeros", "tensorflow.summary.scalar", "tensorflow.nn.l2_loss", "tensorflow.reduce_max", "tensorflow.maximum" ] ]
seanxcwang/TRTorch
[ "76ba62a0d5a258219bedee507e7678e9c5d09763" ]
[ "tests/modules/hub.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nimport timm\n\ntorch.hub._validate_not_a_forked_repo = lambda a, b, c: True\n\nmodels = {\n \"alexnet\": {\n \"model\": models.alexnet(pretrained=True),\n \"path\": \"both\"\n },\n \"vgg16\": {\n \"model\": models.vgg16(pretrained=True),\n \"path\": \"both\"\n },\n \"squeezenet\": {\n \"model\": models.squeezenet1_0(pretrained=True),\n \"path\": \"both\"\n },\n \"densenet\": {\n \"model\": models.densenet161(pretrained=True),\n \"path\": \"both\"\n },\n \"inception_v3\": {\n \"model\": models.inception_v3(pretrained=True),\n \"path\": \"both\"\n },\n #\"googlenet\": models.googlenet(pretrained=True),\n \"shufflenet\": {\n \"model\": models.shufflenet_v2_x1_0(pretrained=True),\n \"path\": \"both\"\n },\n \"mobilenet_v2\": {\n \"model\": models.mobilenet_v2(pretrained=True),\n \"path\": \"both\"\n },\n \"resnext50_32x4d\": {\n \"model\": models.resnext50_32x4d(pretrained=True),\n \"path\": \"both\"\n },\n \"wideresnet50_2\": {\n \"model\": models.wide_resnet50_2(pretrained=True),\n \"path\": \"both\"\n },\n \"mnasnet\": {\n \"model\": models.mnasnet1_0(pretrained=True),\n \"path\": \"both\"\n },\n \"resnet18\": {\n \"model\": torch.hub.load('pytorch/vision:v0.9.0', 'resnet18', pretrained=True),\n \"path\": \"both\"\n },\n \"resnet50\": {\n \"model\": torch.hub.load('pytorch/vision:v0.9.0', 'resnet50', pretrained=True),\n \"path\": \"both\"\n },\n \"ssd\": {\n \"model\": torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd', model_math=\"fp32\"),\n \"path\": \"trace\"\n },\n \"efficientnet_b0\": {\n \"model\": timm.create_model('efficientnet_b0', pretrained=True),\n \"path\": \"script\"\n },\n \"vit\": {\n \"model\": timm.create_model('vit_base_patch16_224', pretrained=True),\n \"path\": \"script\"\n }\n}\n\n# Download sample models\nfor n, m in models.items():\n print(\"Downloading {}\".format(n))\n m[\"model\"] = m[\"model\"].eval().cuda()\n x = torch.ones((1, 3, 300, 300)).cuda()\n if m[\"path\"] == \"both\" or m[\"path\"] == \"trace\":\n trace_model = torch.jit.trace(m[\"model\"], [x])\n torch.jit.save(trace_model, n + '_traced.jit.pt')\n if m[\"path\"] == \"both\" or m[\"path\"] == \"script\":\n script_model = torch.jit.script(m[\"model\"])\n torch.jit.save(script_model, n + '_scripted.jit.pt')\n\n\n# Sample Pool Model (for testing plugin serialization)\nclass Pool(nn.Module):\n\n def __init__(self):\n super(Pool, self).__init__()\n\n def forward(self, x):\n return F.adaptive_avg_pool2d(x, (5, 5))\n\n\nmodel = Pool().eval().cuda()\nx = torch.ones([1, 3, 10, 10]).cuda()\n\ntrace_model = torch.jit.trace(model, x)\ntorch.jit.save(trace_model, \"pooling_traced.jit.pt\")\n\n\n# Sample Nested Module (for module-level fallback testing)\nclass ModuleFallbackSub(nn.Module):\n\n def __init__(self):\n super(ModuleFallbackSub, self).__init__()\n self.conv = nn.Conv2d(1, 3, 3)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n return self.relu(self.conv(x))\n\n\nclass ModuleFallbackMain(nn.Module):\n\n def __init__(self):\n super(ModuleFallbackMain, self).__init__()\n self.layer1 = ModuleFallbackSub()\n self.conv = nn.Conv2d(3, 6, 3)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n return self.relu(self.conv(self.layer1(x)))\n\n\nmodule_fallback_model = ModuleFallbackMain().eval().cuda()\nmodule_fallback_script_model = torch.jit.script(module_fallback_model)\ntorch.jit.save(module_fallback_script_model, \"module_fallback_scripted.jit.pt\")\n\n\n# Sample Conditional Model (for testing partitioning and fallback in conditionals)\nclass FallbackIf(torch.nn.Module):\n\n def __init__(self):\n super(FallbackIf, self).__init__()\n self.relu1 = torch.nn.ReLU()\n self.conv1 = torch.nn.Conv2d(3, 32, 3, 1, 1)\n self.log_sig = torch.nn.LogSigmoid()\n self.conv2 = torch.nn.Conv2d(32, 32, 3, 1, 1)\n self.conv3 = torch.nn.Conv2d(32, 3, 3, 1, 1)\n\n def forward(self, x):\n x = self.relu1(x)\n x_first = x[0][0][0][0].item()\n if x_first > 0:\n x = self.conv1(x)\n x1 = self.log_sig(x)\n x2 = self.conv2(x)\n x = self.conv3(x1 + x2)\n else:\n x = self.log_sig(x)\n x = self.conv1(x)\n return x\n\n\nconditional_model = FallbackIf().eval().cuda()\nconditional_script_model = torch.jit.script(conditional_model)\ntorch.jit.save(conditional_script_model, \"conditional_scripted.jit.pt\")\n" ]
[ [ "torch.nn.functional.adaptive_avg_pool2d", "torch.ones", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.jit.save", "torch.nn.LogSigmoid", "torch.jit.trace", "torch.jit.script", "torch.hub.load" ] ]
horacioMartinez/L2RPN
[ "11e13a60a568b8ad7f9c6de060ac8c41784ced96" ]
[ "old/src/preprocess_data.py" ]
[ "from os import listdir\nfrom os.path import isfile, join\nimport gc\nimport copy\nimport pickle\nimport math\nimport sys\nimport random\nimport resource\nfrom random import randrange, seed\nfrom threading import current_thread\nimport os\nimport time\nimport numpy as np\nfrom multiprocessing import cpu_count\n\n# No scientific notation\nnp.set_printoptions(suppress=True)\nnp.set_printoptions(threshold=sys.maxsize)\n\n\ndef extract_timestep_features(timestep_data, previous_timestep_data):\n assert not timestep_data[\"done\"]\n features = timestep_data[\"features\"]\n\n processed_features = np.array([])\n\n # feature_actions_rho = features[\"actions_rho\"]\n feature_rho = features[\"rho\"]\n feature_topo_vect = features[\"topo_vect\"]\n feature_load_p = features[\"load_p\"]\n feature_load_q = features[\"load_q\"]\n feature_load_v = features[\"load_v\"]\n feature_gen_p = features[\"gen_p\"]\n feature_gen_q = features[\"gen_q\"]\n feature_gen_v = features[\"gen_v\"]\n feature_month = features[\"month\"]\n feature_day = features[\"day\"]\n feature_day_of_week = features[\"day_of_week\"]\n feature_hour_of_day = features[\"hour_of_day\"]\n feature_minute_of_hour = features[\"minute_of_hour\"]\n feature_timestep_overflow = features[\"timestep_overflow\"]\n feature_time_before_cooldown_line = features[\"time_before_cooldown_line\"]\n feature_time_before_cooldown_sub = features[\"time_before_cooldown_sub\"]\n feature_timesteps_since_last_attack = features[\"timesteps_since_last_attack\"]\n feature_time_next_maintenance = features[\"time_next_maintence\"]\n feature_under_attack = features[\"under_attack\"]\n feature_attack_duration = features[\"opponent_attack_duration\"]\n feature_timesteps_since_ongoing_attack_started = features[\"timesteps_since_ongoing_attack_started\"]\n\n if previous_timestep_data is None:\n feature_rho_delta = np.zeros_like(feature_rho)\n else:\n feature_last_timestep_rho = previous_timestep_data[\"features\"][\"rho\"]\n feature_rho_delta = feature_last_timestep_rho - feature_rho\n\n # Limit timesteps to 10 (because 11 is the max timesteps the alarm can wait to have a positive score)\n\n # feature_time_before_cooldown_line[feature_time_before_cooldown_line > 10] = 11\n # feature_time_before_cooldown_sub[feature_time_before_cooldown_sub > 10] = 11\n # feature_time_next_maintenance[feature_time_next_maintenance > 10] = 99\n processed_features = (\n np.concatenate(\n (\n [feature_under_attack],\n [feature_attack_duration],\n [feature_timesteps_since_last_attack],\n [feature_timesteps_since_ongoing_attack_started],\n feature_topo_vect,\n feature_load_p,\n feature_load_q,\n feature_load_v,\n feature_gen_p,\n feature_gen_q,\n feature_gen_v,\n feature_rho,\n feature_rho_delta,\n feature_timestep_overflow,\n feature_time_before_cooldown_line,\n feature_time_before_cooldown_sub,\n feature_time_next_maintenance,\n [feature_month],\n [feature_day],\n [feature_day_of_week],\n [feature_hour_of_day],\n [feature_minute_of_hour],\n )\n )\n .flatten()\n .astype(np.float32)\n )\n\n assert not np.isnan(processed_features).any()\n return processed_features\n\n\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i : i + n]\n\n\nif len(sys.argv) < 3:\n print(\"Not enough arguments. USAGE: <Eval/Train> <NumScenarios (-1 for all)> <Raw/Balanced>\")\n exit()\n\nassert str(sys.argv[1]) == \"Train\" or str(sys.argv[1]) == \"Eval\"\nTRAINING_DATA = str(sys.argv[1]) == \"Train\"\n\nif TRAINING_DATA:\n EPISODES_DATA_PATH = \"data/episodes_data\"\nelse:\n EPISODES_DATA_PATH = \"data/episodes_data_val\"\n\nnumber_of_scenarios = int(sys.argv[2])\n\nassert str(sys.argv[3]) == \"Raw\" or str(sys.argv[3]) == \"Balanced\"\nBALANCED = str(sys.argv[3]) == \"Balanced\"\n\nrandom.seed(0)\nnp.random.seed(0)\n\n\nepisodes_data_files = [f for f in listdir(EPISODES_DATA_PATH) if isfile(join(EPISODES_DATA_PATH, f))]\nrandom.shuffle(episodes_data_files)\n\nEPISODES_CHUNK_SIZE = 2500\nPREPROCESSES_CHUNK_SIZE = 500\nif BALANCED:\n EPISODES_CHUNK_SIZE = 250000\n PREPROCESSES_CHUNK_SIZE = 500\n\nepisode_data_file_chunks = chunks(episodes_data_files, EPISODES_CHUNK_SIZE)\n\nprocessed_scenarios = 0\nchunk_index = 0\nfor episode_data_file_chunk in episode_data_file_chunks:\n if TRAINING_DATA:\n if BALANCED:\n prefix = \"data/nn_training_data_balanced/nn_training_data-\"\n else:\n prefix = \"data/nn_training_data/nn_training_data-\"\n else:\n if BALANCED:\n prefix = \"data/nn_val_data_balanced/nn_val_data-\"\n else:\n prefix = \"data/nn_val_data/nn_val_data-\"\n save_name = prefix + str(chunk_index) + \".pkl\"\n chunk_index += 1\n if os.path.isfile(save_name):\n print(\">>> FILE ALREADY EXISTS!!:\", save_name)\n assert False\n\n processing_episode_data_file_chunks = chunks(episode_data_file_chunk, PREPROCESSES_CHUNK_SIZE)\n print(\"Processing chunk: \", chunk_index)\n input_data = []\n labels = []\n for processing_episode_data_file_chunk in processing_episode_data_file_chunks:\n episodes_data = []\n for episodes_data_file in processing_episode_data_file_chunk:\n if number_of_scenarios > 0 and processed_scenarios == number_of_scenarios:\n break\n try:\n with open(EPISODES_DATA_PATH + \"/\" + episodes_data_file, \"rb\") as f:\n try:\n episodes_data.append(pickle.load(f))\n except EOFError:\n print(\"error on file\", episodes_data_file)\n continue\n except EOFError:\n print(\"error on file\", episodes_data_file)\n continue\n processed_scenarios += 1\n\n number_of_alarm_failures_due_to_action_leading_to_game_over = 0\n number_of_alarm_failures_due_to_no_info_in_previous_timesteps = 0\n # Remove scenarios where we win\n filtered_array = []\n for i in range(0, len(episodes_data)):\n last_timestep_data = episodes_data[i][len(episodes_data[i]) - 1]\n assert last_timestep_data[\"done\"]\n we_won = last_timestep_data[\"we_won\"]\n if we_won:\n continue\n filtered_array.append(episodes_data[i])\n print(\"Filtered out\", len(episodes_data) - len(filtered_array), \"because we won\")\n episodes_data = filtered_array\n\n for i in range(0, len(episodes_data)):\n timesteps_data = episodes_data[i]\n last_timestep = timesteps_data[len(timesteps_data) - 1][\"timestep\"]\n\n if BALANCED:\n non_true_indexes = np.arange(len(timesteps_data) - 12)\n np.random.shuffle(non_true_indexes)\n non_true_indexes = non_true_indexes[:12]\n true_indexes = np.arange(len(timesteps_data) - 12, len(timesteps_data))\n balanced_indexes = [*non_true_indexes, *true_indexes]\n for j in range(0, len(timesteps_data)):\n if BALANCED:\n if j not in balanced_indexes:\n continue\n timestep_data = timesteps_data[j]\n if timestep_data[\"done\"]:\n continue\n previous_timestep_data = None\n if j > 0:\n previous_timestep_data = timesteps_data[j - 1]\n current_timestep = timestep_data[\"timestep\"]\n does_finish_in_11_or_less_timesteps = (last_timestep - current_timestep) <= 11\n input = extract_timestep_features(timestep_data, previous_timestep_data)\n input_data.append(input)\n labels.append(does_finish_in_11_or_less_timesteps)\n print(\"chunk_index: \", chunk_index)\n print(\"processed_scenarios:\", processed_scenarios)\n print(\"Processed episode\", i, \"out of\", len(episodes_data) - 1)\n # del episodes_data\n # gc.collect()\n\n input_data = np.array(input_data)\n labels = np.array(labels)\n\n indices = np.arange(len(input_data))\n\n # Set seed as sum of first input data so we can get reproducibility and different rand in different chunks\n # seed = int(np.sum(input_data[0]) * 1000)\n # np.random.seed(seed)\n np.random.shuffle(indices)\n\n shuffled_input_data = input_data[indices]\n shuffled_labels = labels[indices]\n\n data = {\"input_data\": shuffled_input_data, \"labels\": shuffled_labels}\n\n print(\"Saving NN training data to file:\", save_name)\n with open(save_name, \"wb\") as f:\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)\n del input_data\n del shuffled_input_data\n del labels\n del shuffled_labels\n del data\n gc.collect()\n\nprint(\"FINISH!\")\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros_like", "numpy.isnan", "numpy.random.seed", "numpy.set_printoptions", "numpy.random.shuffle" ] ]
pysg/pyther
[ "6a47fc41533cc50bc64134e42ddd3ed8d54d75c7" ]
[ "constants.py" ]
[ "import numpy as np\n\n# Constante universal de los gases R [=] definir las unidades #####\nRGAS = 0.08314472\n# Definir el significado fisicoquímico\n\nA0, B0, C0 = 0.0017, 1.9681, -2.7238\n# Definir el significado fisicoquímico\n\nA1, B1, C1 = -2.4407, 7.4513, 12.504\n# Definir el significado fisicoquímico\n\nD = np.array([0.428363, 18.496215, 0.338426, 0.660, 789.723105, 2.512392])\n\n# definir la correlación de los valores para los paramteros:\n# A0, B0, C0 y A1, B1, C1 y D\n" ]
[ [ "numpy.array" ] ]
oshaikh13/audio-reactive-led-strip
[ "f486f705989acc5e2b8f2d89f99e85b306908b69" ]
[ "python/led.py" ]
[ "from __future__ import print_function\nfrom __future__ import division\n\nimport platform\nimport numpy as np\nimport config\nimport time\n\nfrom pySerialTransfer import pySerialTransfer as txfer\nlink = txfer.SerialTransfer('COM3')\nlink.open()\ntime.sleep(2)\n\n_gamma = np.load(config.GAMMA_TABLE_PATH)\n\"\"\"Gamma lookup table used for nonlinear brightness correction\"\"\"\n\n_prev_pixels = np.tile(253, (3, config.N_PIXELS))\n\"\"\"Pixel values that were most recently displayed on the LED strip\"\"\"\n\npixels = np.tile(1, (3, config.N_PIXELS))\n\"\"\"Pixel values for the LED strip\"\"\"\n\n\ndef _update_esp8266():\n \"\"\"Sends UDP packets to ESP8266 to update LED strip values\n\n The ESP8266 will receive and decode the packets to determine what values\n to display on the LED strip. The communication protocol supports LED strips\n with a maximum of 256 LEDs.\n\n The packet encoding scheme is:\n |i|r|g|b|\n where\n i (0 to 255): Index of LED to change (zero-based)\n r (0 to 255): Red value of LED\n g (0 to 255): Green value of LED\n b (0 to 255): Blue value of LED\n \"\"\"\n global pixels, _prev_pixels\n # Truncate values and cast to integer\n\n # print(pixels.shape)\n pixels = np.clip(pixels, 0, 255).astype(int)\n # Optionally apply gamma correc tio\n p = _gamma[pixels] if config.SOFTWARE_GAMMA_CORRECTION else np.copy(pixels)\n\n m = [0]\n \n for i in range(p.shape[1]):\n \n m.append(p[0][i]) # Pixel red value\n m.append(p[1][i]) # Pixel green value\n m.append(p[2][i]) # Pixel blue value\n if i % 30 == 0:\n # print(m)\n # time.sleep(0.01)\n link.txBuff = m.copy()\n link.send(len(link.txBuff))\n m = [i // 30]\n \n _prev_pixels = np.copy(p)\n\n\ndef update():\n \"\"\"Updates the LED strip values\"\"\"\n _update_esp8266()\n\n\n# Execute this file to run a LED strand test\n# If everything is working, you should see a red, green, and blue pixel scroll\n# across the LED strip continously\nif __name__ == '__main__':\n import time\n # Turn all pixels off\n pixels *= 0\n pixels[0, 0] = 255 # Set 1st pixel red\n pixels[1, 1] = 255 # Set 2nd pixel green\n pixels[2, 2] = 255 # Set 3rd pixel blue\n print('Starting LED strand test')\n\n while True:\n pixels = np.roll(pixels, 1, axis=1)\n update()\n" ]
[ [ "numpy.copy", "numpy.load", "numpy.tile", "numpy.roll", "numpy.clip" ] ]
epgreig/robo-advisor
[ "f4429ddf5f3917330c2675fcb807de5c894db9b4" ]
[ "instrument.py" ]
[ "\nfrom environment import Environment\nimport numpy as np\nfrom scipy.stats import norm\nfrom scipy.optimize import minimize\nfrom datetime import timedelta\nfrom pandas._libs.tslibs.timestamps import Timestamp, Timedelta\n\n\nclass Instrument:\n def __init__(self):\n self.ccy = None\n self.type = None\n self.name = None\n\n def value(self, env: Environment):\n pass\n\n\nclass Cash(Instrument):\n def __init__(self, ccy):\n super().__init__()\n self.ccy = ccy\n self.type = 'Cash'\n\n def value(self, *args, **kwargs):\n return 1\n\n\nclass Equity(Instrument):\n def __init__(self, name, ccy):\n super().__init__()\n self.name = name\n self.ccy = ccy\n self.type = 'EQ'\n\n def value(self, env: Environment):\n return env.prices[self.name] # price quoted in native ccy\n\n def div_yield(self, env: Environment):\n \"\"\"\n :param env:\n :return: a single (continuously paid) index div yield\n \"\"\"\n return env.divs[self.name]\n\n\nclass Bond(Instrument):\n def __init__(self, name, ccy, par, T, coup, freq=2):\n super().__init__()\n self.name = name\n self.ccy = ccy\n self.type = 'FI'\n self.par = par\n self.T = T\n self.coup = coup\n self.coupon = coup * par / (freq * 100.)\n self.freq = float(freq)\n self.periods = T * float(freq)\n self.dt = [(i+1)/freq for i in range(int(self.periods))]\n\n def value(self, env: Environment):\n ytm = env.curves[self.ccy].get_rate(self.T)\n disc = 1/(1+ytm/self.freq)\n pv_coupons = sum([self.coupon * (disc ** (self.freq * t)) for t in dt])\n pv_face = self.par / (disc ** self.periods)\n return pv_coupons + pv_face\n\n\nclass Option(Instrument):\n def __init__(self, name, ccy, is_call, ul, K, T):\n \"\"\"\n :param name:\n :param ccy: currency\n :param is_call: call indicator\n :param ul: underlying\n :param K: strike\n :param T: datetime object, maturity date (NOT time to maturity!)\n \"\"\"\n super().__init__()\n self.name = name\n self.ccy = ccy\n self.is_call = is_call\n self.ul = ul\n self.K = K\n self.T = T\n self.type = 'Option'\n\n def value(self, env: Environment, ba_spread=0):\n\n if self.T < env.date:\n raise ValueError(\"Environment date is after option maturity\")\n ttm = (self.T - env.date).days / 365\n\n S = env.prices[self.ul]\n moneyness = S / self.K\n\n if abs(ttm-1/12) < 1/36:\n tenor = 1\n elif abs(ttm-2/12) < 1/36:\n tenor = 2\n elif abs(ttm) < 1/36:\n if self.is_call:\n return max(S - self.K, 0)\n else:\n return max(self.K - S, 0)\n else:\n raise ValueError(\"Time to maturity is not 1 or 2 months\")\n vol = env.surfaces[self.ul].get_iv(tenor, moneyness) + ba_spread\n int_rate = env.curves[self.ccy].get_rate(tenor)\n div_yield = env.divs[self.ul]\n\n return Option.bs_price(S, self.K, ttm, vol, int_rate, div_yield, self.is_call)\n\n def get_greeks(self, env: Environment):\n if self.T < env.date:\n raise ValueError(\"Environment date is after option maturity\")\n ttm = (self.T - env.date).days / 365\n\n S = env.prices[self.ul]\n moneyness = S / self.K\n\n if abs(ttm-1/12) < 1/36:\n tenor = 1\n elif abs(ttm-2/12) < 1/36:\n tenor = 2\n else:\n raise ValueError(\"Time to maturity is not 1 or 2 months\")\n\n vol = env.surfaces[self.ul].get_iv(tenor, moneyness)\n int_rate = env.curves[self.ccy].get_rate(tenor)\n div_yield = env.divs[self.ul]\n\n delta = Option.bs_delta(S, self.K, ttm, vol, int_rate, div_yield, self.is_call)\n gamma = Option.bs_gamma(S, self.K, ttm, vol, int_rate, div_yield)\n vega = Option.bs_vega(S, self.K, ttm, vol, int_rate, div_yield)\n theta = Option.bs_theta(S, self.K, ttm, vol, int_rate, div_yield, self.is_call)\n rho = Option.bs_rho(S, self.K, ttm, vol, int_rate, div_yield)\n greeks = {\"delta\": delta, \"gamma\": gamma, \"vega\": vega, \"theta\": theta, 'rho': rho}\n return greeks\n\n @staticmethod\n def bs_price(S, K, ttm, vol, int_rate, div_yield, is_call):\n\n F = S * np.exp((int_rate - div_yield) * ttm)\n d1 = (np.log(S/K) + (int_rate - div_yield + 0.5 * vol ** 2) * ttm) / (vol * np.sqrt(ttm))\n d2 = d1 - vol * np.sqrt(ttm)\n if is_call:\n return np.exp(-int_rate * ttm) * (F * norm.cdf(d1) - K * norm.cdf(d2))\n else:\n return np.exp(-int_rate * ttm) * (-F * norm.cdf(-d1) + K * norm.cdf(-d2))\n\n @staticmethod\n def bs_delta(S, K, ttm, vol, int_rate, div_yield, is_call):\n d1 = (np.log(S/K) + (int_rate - div_yield + 0.5 * vol ** 2) * ttm) / (vol * np.sqrt(ttm))\n if is_call:\n return np.exp(-div_yield*ttm) * norm.cdf(d1)\n else:\n return -np.exp(-div_yield*ttm) * norm.cdf(-d1)\n\n @staticmethod\n def bs_gamma(S, K, ttm, vol, int_rate, div_yield):\n d1 = (np.log(S / K) + (int_rate - div_yield + 0.5 * vol ** 2) * ttm) / (vol * np.sqrt(ttm))\n gamma = np.exp(-div_yield * ttm) * norm.pdf(d1) / (S * vol * np.sqrt(ttm))\n return gamma\n\n @staticmethod\n def bs_vega(S, K, ttm, vol, int_rate, div_yield):\n d1 = (np.log(S / K) + (int_rate - div_yield + 0.5 * vol ** 2) * ttm) / (vol * np.sqrt(ttm))\n vega = S * np.exp(-div_yield * ttm) * norm.pdf(d1) * np.sqrt(ttm)\n return vega\n\n @staticmethod\n def bs_theta(S, K, ttm, vol, int_rate, div_yield, is_call):\n d1 = (np.log(S/K) + (int_rate - div_yield + 0.5 * vol ** 2) * ttm) / (vol * np.sqrt(ttm))\n d2 = d1 - vol * np.sqrt(ttm)\n if is_call:\n term1 = -np.exp(-div_yield * ttm)*S*norm.pdf(d1)*vol/(2*np.sqrt(ttm))\n term2 = -int_rate*K*np.exp(-int_rate * ttm)*norm.cdf(d2)\n term3 = div_yield*S*np.exp(-div_yield * ttm)*norm.cdf(d1)\n return term1+term2+term3\n else:\n term1 = -np.exp(-div_yield * ttm) * S * norm.pdf(d1) * vol / (2 * np.sqrt(ttm))\n term2 = int_rate * K * np.exp(-int_rate * ttm) * norm.cdf(-d2)\n term3 = -div_yield * S * np.exp(-div_yield * ttm) * norm.cdf(-d1)\n return term1 + term2 + term3\n\n @staticmethod\n def bs_rho(S, K, ttm, vol, int_rate, div_yield):\n d1 = (np.log(S / K) + (int_rate - div_yield + 0.5 * vol ** 2) * ttm) / (vol * np.sqrt(ttm))\n d2 = d1 - vol * np.sqrt(ttm)\n rho = K * ttm * np.exp(-int_rate * ttm) * norm.cdf(d2)\n return rho\n\n @staticmethod\n def bs_impvol(S, K, ttm, mkt_price, int_rate, div_yield, is_call, n_iters=5000, tol=1e-2):\n guess = 0.2\n price = mkt_price * (1+10*tol) # just to enter loop\n n = 0\n while abs(price-mkt_price)/mkt_price > tol:\n price = Option.bs_price(S, K, ttm, guess, int_rate, div_yield, is_call)\n vega = Option.bs_vega(S, K, ttm, guess, int_rate, div_yield)\n guess = guess - (price-mkt_price)/vega\n if guess > 10:\n guess = 10\n elif abs(guess) < 1e-6:\n guess = 0.5\n n += 1\n if n > n_iters:\n print(\"Impvol rootfinding failed\")\n return guess\n\n return guess\n" ]
[ [ "scipy.stats.norm.pdf", "numpy.log", "numpy.exp", "numpy.sqrt", "scipy.stats.norm.cdf" ] ]
parkermac/LO
[ "09e0197de7f2166bfa835ec62018b7a8fbfa7379" ]
[ "extract/tef/extract_segment_one_time.py" ]
[ "\"\"\"\nThis code extracts all needed segment data for one history file,\nlooping over all variables and all segments.\n\nTo test on mac:\nrun extract_segment_one_time.py -pth /Users/pm8/Documents/LiveOcean_roms/output -out_dir /Users/pm8/Documents/LO_output/extract/cas6_v3_lo8b/segment_temp_2019.07.04_2019.07.04 -gtagex cas6_v3_lo8b -d 2019.07.04 -nhis 3 -get_bio True -test True\n\"\"\"\nfrom pathlib import Path\nimport sys\nimport argparse\nimport numpy as np\nimport xarray as xr\nfrom time import time\nimport pickle\nimport pandas as pd\n\nfrom lo_tools import Lfun, zfun, zrfun\nimport tef_fun\n\n# command line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('-pth', type=str) # path to LO_roms\nparser.add_argument('-out_dir', type=str) # path to temporary directory for output\nparser.add_argument('-gtagex', type=str) # gtagex\nparser.add_argument('-d', type=str) # date string like 2019.07.04\nparser.add_argument('-nhis', type=int, default=1) # history file number, 1-25\nparser.add_argument('-get_bio', type=Lfun.boolean_string, default=True)\nparser.add_argument('-testing', type=Lfun.boolean_string, default=False)\nargs = parser.parse_args()\n\nLdir = Lfun.Lstart(gridname=args.gtagex.split('_')[0])\n\nnhiss = ('0000' + str(args.nhis))[-4:]\nfn = Path(args.pth) / args.gtagex / ('f' + args.d) / ('ocean_his_' + nhiss + '.nc')\nout_dir = Path(args.out_dir)\nLfun.make_dir(out_dir)\nout_fn = out_dir / ('A_' + args.d + '_' + nhiss + '.p')\nout_fn.unlink(missing_ok=True)\n\n# ---\n# get grid info\nG, S, T = zrfun.get_basic_info(fn)\nh = G['h']\nDA = G['DX'] * G['DY']\nDA3 = DA.reshape((1,G['M'],G['L']))\n\n# get segment info\nvol_dir = Ldir['LOo'] / 'extract' / 'tef' / ('volumes_' + Ldir['gridname'])\nv_df = pd.read_pickle(vol_dir / 'volumes.p')\nj_dict = pickle.load(open(vol_dir / 'j_dict.p', 'rb'))\ni_dict = pickle.load(open(vol_dir / 'i_dict.p', 'rb'))\nseg_list = list(v_df.index)\n\n# set list of variables to extract\nif args.get_bio:\n vn_list = tef_fun.vn_list\nelse:\n vn_list = ['salt']\n\ntt0 = time()\nprint(fn)\n \nds = xr.open_dataset(fn)\n\nvn_dict = {}\nfor vn in vn_list:\n vn_dict[vn] = ds[vn][0,:,:,:].values\nzeta = ds['zeta'][0,:,:].values\nds.close()\n\n# find the volume and other variables for each segment, at this time\nA = pd.DataFrame(index=seg_list)\nAA = dict()\nfor seg_name in seg_list:\n \n jjj = j_dict[seg_name]\n iii = i_dict[seg_name]\n z_w = zrfun.get_z(h[jjj,iii], zeta[jjj,iii], S, only_w=True)\n dz = np.diff(z_w, axis=0)\n DV = dz * DA3[0,jjj,iii]\n volume = DV.sum()\n AA['volume'] = volume\n \n for vn in vn_list:\n AA[vn] = (vn_dict[vn][:,jjj,iii] * DV).sum()/volume\n # store results\n for vn in vn_list + ['volume']:\n A.loc[seg_name, vn] = AA[vn]\n \nprint(' ** took %0.1f sec' % (time()-tt0))\nsys.stdout.flush()\n\nA.to_pickle(out_fn)\nprint('Time to extract all segment data = %0.2f sec' % (time()-tt0))\nsys.stdout.flush()\n\n" ]
[ [ "pandas.read_pickle", "pandas.DataFrame", "numpy.diff" ] ]
ddutt/modin
[ "11581d552493c7caf42b25d8af2408b75c008a10" ]
[ "modin/pandas/groupby.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pandas\nimport pandas.core.groupby\nfrom pandas.core.dtypes.common import is_list_like\nimport pandas.core.common as com\nimport sys\n\nfrom modin.error_message import ErrorMessage\nfrom .utils import _inherit_docstrings\n\n\n@_inherit_docstrings(\n pandas.core.groupby.DataFrameGroupBy,\n excluded=[\n pandas.core.groupby.DataFrameGroupBy,\n pandas.core.groupby.DataFrameGroupBy.__init__,\n ],\n)\nclass DataFrameGroupBy(object):\n def __init__(\n self,\n df,\n by,\n axis,\n level,\n as_index,\n sort,\n group_keys,\n squeeze,\n idx_name,\n **kwargs\n ):\n self._axis = axis\n self._idx_name = idx_name\n self._df = df\n self._query_compiler = self._df._query_compiler\n self._index = self._query_compiler.index\n self._columns = self._query_compiler.columns\n self._by = by\n if level is None:\n # This tells us whether or not there are multiple columns/rows in the groupby\n self._is_multi_by = all(obj in self._df for obj in self._by) and axis == 0\n else:\n self._is_multi_by = False\n self._level = level\n self._kwargs = {\n \"level\": level,\n \"sort\": sort,\n \"as_index\": as_index,\n \"group_keys\": group_keys,\n \"squeeze\": squeeze,\n }\n self._kwargs.update(kwargs)\n\n @property\n def _sort(self):\n return self._kwargs.get(\"sort\")\n\n @property\n def _as_index(self):\n return self._kwargs.get(\"as_index\")\n\n def __getattr__(self, key):\n \"\"\"Afer regular attribute access, looks up the name in the columns\n\n Args:\n key (str): Attribute name.\n\n Returns:\n The value of the attribute.\n \"\"\"\n try:\n return object.__getattribute__(self, key)\n except AttributeError as e:\n if key in self._columns:\n return self._default_to_pandas(lambda df: df.__getitem__(key))\n raise e\n\n _index_grouped_cache = None\n\n @property\n def _index_grouped(self):\n if self._index_grouped_cache is None:\n if self._is_multi_by:\n # Because we are doing a collect (to_pandas) here and then groupby, we\n # end up using pandas implementation. Add the warning so the user is\n # aware.\n ErrorMessage.catch_bugs_and_request_email(self._axis == 1)\n ErrorMessage.default_to_pandas(\"Groupby with multiple columns\")\n self._index_grouped_cache = {\n k: v.index\n for k, v in self._df._query_compiler.getitem_column_array(self._by)\n .to_pandas()\n .groupby(by=self._by)\n }\n else:\n if self._axis == 0:\n self._index_grouped_cache = self._index.groupby(self._by)\n else:\n self._index_grouped_cache = self._columns.groupby(self._by)\n return self._index_grouped_cache\n\n @property\n def _iter(self):\n from .dataframe import DataFrame\n\n if sys.version_info[0] == 2:\n group_ids = self._index_grouped.iterkeys()\n elif sys.version_info[0] == 3:\n group_ids = self._index_grouped.keys()\n if self._axis == 0:\n return (\n (\n k,\n DataFrame(\n query_compiler=self._query_compiler.getitem_row_array(\n self._index.get_indexer_for(self._index_grouped[k].unique())\n )\n ),\n )\n for k in (sorted(group_ids) if self._sort else group_ids)\n )\n else:\n return (\n (\n k,\n DataFrame(\n query_compiler=self._query_compiler.getitem_column_array(\n self._index_grouped[k].unique()\n )\n ),\n )\n for k in (sorted(group_ids) if self._sort else group_ids)\n )\n\n @property\n def ngroups(self):\n return len(self)\n\n def skew(self, **kwargs):\n return self._apply_agg_function(lambda df: df.skew(**kwargs))\n\n def ffill(self, limit=None):\n return self._default_to_pandas(lambda df: df.ffill(limit=limit))\n\n def sem(self, ddof=1):\n return self._default_to_pandas(lambda df: df.sem(ddof=ddof))\n\n def mean(self, *args, **kwargs):\n return self._apply_agg_function(lambda df: df.mean(*args, **kwargs))\n\n def any(self):\n return self._apply_agg_function(lambda df: df.any())\n\n @property\n def plot(self): # pragma: no cover\n return self._default_to_pandas(lambda df: df.plot)\n\n def ohlc(self):\n return self._default_to_pandas(lambda df: df.ohlc())\n\n def __bytes__(self):\n return self._default_to_pandas(lambda df: df.__bytes__())\n\n @property\n def tshift(self):\n return self._default_to_pandas(lambda df: df.tshift)\n\n @property\n def groups(self):\n return self._index_grouped\n\n def min(self, **kwargs):\n return self._apply_agg_function(lambda df: df.min(**kwargs))\n\n def idxmax(self):\n return self._default_to_pandas(lambda df: df.idxmax())\n\n @property\n def ndim(self):\n return 2 # ndim is always 2 for DataFrames\n\n def shift(self, periods=1, freq=None, axis=0):\n return self._default_to_pandas(\n lambda df: df.shift(periods=periods, freq=freq, axis=axis)\n )\n\n def nth(self, n, dropna=None):\n return self._default_to_pandas(lambda df: df.nth(n, dropna=dropna))\n\n def cumsum(self, axis=0, *args, **kwargs):\n return self._apply_agg_function(lambda df: df.cumsum(axis, *args, **kwargs))\n\n @property\n def indices(self):\n return self._index_grouped\n\n def pct_change(self):\n return self._default_to_pandas(lambda df: df.pct_change())\n\n def filter(self, func, dropna=True, *args, **kwargs):\n return self._default_to_pandas(\n lambda df: df.filter(func, dropna=dropna, *args, **kwargs)\n )\n\n def cummax(self, axis=0, **kwargs):\n return self._apply_agg_function(lambda df: df.cummax(axis, **kwargs))\n\n def apply(self, func, *args, **kwargs):\n return self._apply_agg_function(lambda df: df.apply(func, *args, **kwargs))\n\n @property\n def dtypes(self):\n if self._axis == 1:\n raise ValueError(\"Cannot call dtypes on groupby with axis=1\")\n return self._apply_agg_function(lambda df: df.dtypes)\n\n def first(self, **kwargs):\n return self._default_to_pandas(lambda df: df.first(**kwargs))\n\n def backfill(self, limit=None):\n return self.bfill(limit)\n\n def __getitem__(self, key):\n # This operation requires a SeriesGroupBy Object\n return self._default_to_pandas(lambda df: df.__getitem__(key))\n\n def cummin(self, axis=0, **kwargs):\n return self._apply_agg_function(lambda df: df.cummin(axis=axis, **kwargs))\n\n def bfill(self, limit=None):\n return self._default_to_pandas(lambda df: df.bfill(limit=limit))\n\n def idxmin(self):\n return self._default_to_pandas(lambda df: df.idxmin())\n\n def prod(self, **kwargs):\n return self._apply_agg_function(lambda df: df.prod(**kwargs))\n\n def std(self, ddof=1, *args, **kwargs):\n return self._apply_agg_function(lambda df: df.std(ddof, *args, **kwargs))\n\n def aggregate(self, arg, *args, **kwargs):\n if self._axis != 0:\n # This is not implemented in pandas,\n # so we throw a different message\n raise NotImplementedError(\"axis other than 0 is not supported\")\n\n if is_list_like(arg):\n return self._default_to_pandas(\n lambda df: df.aggregate(arg, *args, **kwargs)\n )\n return self._apply_agg_function(lambda df: df.aggregate(arg, *args, **kwargs))\n\n def last(self, **kwargs):\n return self._default_to_pandas(lambda df: df.last(**kwargs))\n\n def mad(self):\n return self._default_to_pandas(lambda df: df.mad())\n\n def rank(self, **kwargs):\n return self._apply_agg_function(lambda df: df.rank(**kwargs))\n\n @property\n def corrwith(self):\n return self._default_to_pandas(lambda df: df.corrwith)\n\n def pad(self, limit=None):\n return self._default_to_pandas(lambda df: df.pad(limit=limit))\n\n def max(self, **kwargs):\n return self._apply_agg_function(lambda df: df.max(**kwargs))\n\n def var(self, ddof=1, *args, **kwargs):\n return self._apply_agg_function(lambda df: df.var(ddof, *args, **kwargs))\n\n def get_group(self, name, obj=None):\n return self._default_to_pandas(lambda df: df.get_group(name, obj=obj))\n\n def __len__(self):\n return len(self._index_grouped)\n\n def all(self, **kwargs):\n return self._apply_agg_function(lambda df: df.all(**kwargs))\n\n def size(self):\n return pandas.Series({k: len(v) for k, v in self._index_grouped.items()})\n\n def sum(self, **kwargs):\n return self._apply_agg_function(lambda df: df.sum(**kwargs))\n\n def __unicode__(self):\n return self._default_to_pandas(lambda df: df.__unicode__())\n\n def describe(self, **kwargs):\n return self._default_to_pandas(lambda df: df.describe(**kwargs))\n\n def boxplot(\n self,\n grouped,\n subplots=True,\n column=None,\n fontsize=None,\n rot=0,\n grid=True,\n ax=None,\n figsize=None,\n layout=None,\n **kwargs\n ):\n return self._default_to_pandas(\n lambda df: df.boxplot(\n grouped,\n subplots=subplots,\n column=column,\n fontsize=fontsize,\n rot=rot,\n grid=grid,\n ax=ax,\n figsize=figsize,\n layout=layout,\n **kwargs\n )\n )\n\n def ngroup(self, ascending=True):\n index = self._index if not self._axis else self._columns\n return (\n pandas.Series(index=index)\n .groupby(by=self._by, **self._kwargs)\n .ngroup(ascending)\n )\n\n def nunique(self, dropna=True):\n return self._apply_agg_function(lambda df: df.nunique(dropna), drop=False)\n\n def resample(self, rule, *args, **kwargs):\n return self._default_to_pandas(lambda df: df.resample(rule, *args, **kwargs))\n\n def median(self, **kwargs):\n return self._apply_agg_function(lambda df: df.median(**kwargs))\n\n def head(self, n=5):\n return self._default_to_pandas(lambda df: df.head(n))\n\n def cumprod(self, axis=0, *args, **kwargs):\n return self._apply_agg_function(lambda df: df.cumprod(axis, *args, **kwargs))\n\n def __iter__(self):\n return self._iter.__iter__()\n\n def agg(self, arg, *args, **kwargs):\n return self.aggregate(arg, *args, **kwargs)\n\n def cov(self):\n return self._default_to_pandas(lambda df: df.cov())\n\n def transform(self, func, *args, **kwargs):\n return self._apply_agg_function(lambda df: df.transform(func, *args, **kwargs))\n\n def corr(self, **kwargs):\n return self._default_to_pandas(lambda df: df.corr(**kwargs))\n\n def fillna(self, **kwargs):\n return self._apply_agg_function(lambda df: df.fillna(**kwargs))\n\n def count(self, **kwargs):\n return self._apply_agg_function(lambda df: df.count(**kwargs))\n\n def pipe(self, func, *args, **kwargs):\n return com._pipe(self, func, *args, **kwargs)\n\n def cumcount(self, ascending=True):\n return self._default_to_pandas(lambda df: df.cumcount(ascending=ascending))\n\n def tail(self, n=5):\n return self._default_to_pandas(lambda df: df.tail(n))\n\n # expanding and rolling are unique cases and need to likely be handled\n # separately. They do not appear to be commonly used.\n def expanding(self, *args, **kwargs):\n return self._default_to_pandas(lambda df: df.expanding(*args, **kwargs))\n\n def rolling(self, *args, **kwargs):\n return self._default_to_pandas(lambda df: df.rolling(*args, **kwargs))\n\n def hist(self):\n return self._default_to_pandas(lambda df: df.hist())\n\n def quantile(self, q=0.5, **kwargs):\n if is_list_like(q):\n return self._default_to_pandas(lambda df: df.quantile(q=q, **kwargs))\n\n return self._apply_agg_function(lambda df: df.quantile(q, **kwargs))\n\n def diff(self):\n return self._default_to_pandas(lambda df: df.diff())\n\n def take(self, **kwargs):\n return self._default_to_pandas(lambda df: df.take(**kwargs))\n\n def _apply_agg_function(self, f, drop=True, **kwargs):\n \"\"\"Perform aggregation and combine stages based on a given function.\n\n Args:\n f: The function to apply to each group.\n\n Returns:\n A new combined DataFrame with the result of all groups.\n \"\"\"\n assert callable(f), \"'{0}' object is not callable\".format(type(f))\n from .dataframe import DataFrame\n\n if self._is_multi_by or self._level is not None:\n return self._default_to_pandas(f, **kwargs)\n # For aggregations, pandas behavior does this for the result.\n # For other operations it does not, so we wait until there is an aggregation to\n # actually perform this operation.\n if self._idx_name is not None and drop:\n groupby_qc = self._query_compiler.drop(columns=[self._idx_name])\n else:\n groupby_qc = self._query_compiler\n new_manager = groupby_qc.groupby_agg(\n self._by, self._axis, f, self._kwargs, kwargs\n )\n if self._idx_name is not None and self._as_index:\n new_manager.index.name = self._idx_name\n return DataFrame(query_compiler=new_manager)\n\n def _default_to_pandas(self, f, **kwargs):\n \"\"\"Defailts the execution of this function to pandas.\n\n Args:\n f: The function to apply to each group.\n\n Returns:\n A new Modin DataFrame with the result of the pandas function.\n \"\"\"\n return self._df._default_to_pandas(\n lambda df: f(df.groupby(by=self._by, axis=self._axis, **self._kwargs)),\n **kwargs\n )\n" ]
[ [ "pandas.core.dtypes.common.is_list_like", "pandas.core.common._pipe", "pandas.Series" ] ]
sveta-egorova/dlcourse_ai
[ "53c216e6116fd5a55fe5f99450451a784addbc80" ]
[ "assignments/assignment2/model.py" ]
[ "import numpy as np\n\nfrom layers import FullyConnectedLayer, ReLULayer, softmax_with_cross_entropy, l2_regularization\n\n\nclass TwoLayerNet:\n \"\"\" Neural network with two fully connected layers \"\"\"\n\n def __init__(self, n_input, n_output, hidden_layer_size, reg):\n \"\"\"\n Initializes the neural network\n\n Arguments:\n n_input, int - dimension of the model input\n n_output, int - number of classes to predict\n hidden_layer_size, int - number of neurons in the hidden layer\n reg, float - L2 regularization strength\n \"\"\"\n self.reg = reg\n self.fc1 = FullyConnectedLayer(n_input, hidden_layer_size)\n self.relu = ReLULayer()\n self.fc2 = FullyConnectedLayer(hidden_layer_size, n_output)\n # TODO Create necessary layers\n# raise Exception(\"Not implemented!\")\n\n def compute_loss_and_gradients(self, X, y):\n \"\"\"\n Computes total loss and updates parameter gradients\n on a batch of training examples\n\n Arguments:\n X, np array (batch_size, input_features) - input data\n y, np array of int (batch_size) - classes\n \"\"\"\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n # TODO Set parameter gradient to zeros\n for _, param in self.params().items():\n param.grad = np.zeros(param.grad.shape)\n # Hint: using self.params() might be useful!\n# raise Exception(\"Not implemented!\")\n \n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model\n results = self.fc1.forward(X)\n results = self.relu.forward(results)\n results = self.fc2.forward(results)\n loss, d_predictions = softmax_with_cross_entropy(results, y)\n# print(\"loss before regularization =\", loss)\n# grad_reg = 0\n# print(\"Total grad = \", param.grad)\n# print(\"Reg loss for all parameters =\", loss_reg)\n d_input_fc2 = self.fc2.backward(d_predictions)\n d_input_relu = self.relu.backward(d_input_fc2)\n d_input_fc1 = self.fc1.backward(d_input_relu)\n \n loss_reg = 0\n\n for param_name, param in self.params().items():\n loss_param, grad = l2_regularization(param.value, self.reg)\n# print(f\"reg_loss for {param_name} = \", loss_param)\n loss_reg += loss_param\n param.grad += grad\n # After that, implement l2 regularization on all params\n # Hint: self.params() is useful again!\n# raise Exception(\"Not implemented!\")\n\n return loss + loss_reg\n\n def predict(self, X):\n \"\"\"\n Produces classifier predictions on the set\n\n Arguments:\n X, np array (test_samples, num_features)\n\n Returns:\n y_pred, np.array of int (test_samples)\n \"\"\"\n # TODO: Implement predict\n # Hint: some of the code of the compute_loss_and_gradients\n # can be reused\n pred = np.zeros(X.shape[0], np.int)\n results = self.fc1.forward(X)\n results = self.relu.forward(results)\n results = self.fc2.forward(results)\n \n probs = results - np.max(results, axis=1, keepdims=True)\n probs = np.exp(probs) \n probs /= np.sum(probs, axis=1, keepdims=True)\n\n pred = np.argmax(probs, axis=1)\n\n# raise Exception(\"Not implemented!\")\n return pred\n\n def params(self):\n result = {'W1': self.fc1.W, 'B1': self.fc1.B,\n 'W2': self.fc2.W, 'B2': self.fc2.B}\n\n # TODO Implement aggregating all of the params\n\n# raise Exception(\"Not implemented!\")\n\n return result\n" ]
[ [ "numpy.max", "numpy.zeros", "numpy.sum", "numpy.exp", "numpy.argmax" ] ]
CaptainSharf/chapel
[ "0659db4128ee8e324f2a987d4f48666961db513b" ]
[ "third-party/llvm/llvm-src/utils/llvm-locstats/llvm-locstats.py" ]
[ "#!/usr/bin/env python\n#\n# This is a tool that works like debug location coverage calculator.\n# It parses the llvm-dwarfdump --statistics output by reporting it\n# in a more human readable way.\n#\n\nfrom __future__ import print_function\nimport argparse\nimport os\nimport sys\nfrom json import loads\nfrom math import ceil\nfrom collections import OrderedDict\nfrom subprocess import Popen, PIPE\n\n# Holds the debug location statistics.\nclass LocationStats:\n def __init__(self, file_name, variables_total, variables_total_locstats,\n variables_with_loc, variables_scope_bytes_covered, variables_scope_bytes,\n variables_coverage_map):\n self.file_name = file_name\n self.variables_total = variables_total\n self.variables_total_locstats = variables_total_locstats\n self.variables_with_loc = variables_with_loc\n self.scope_bytes_covered = variables_scope_bytes_covered\n self.scope_bytes = variables_scope_bytes\n self.variables_coverage_map = variables_coverage_map\n\n # Get the PC ranges coverage.\n def get_pc_coverage(self):\n pc_ranges_covered = int(ceil(self.scope_bytes_covered * 100.0) \\\n / self.scope_bytes)\n return pc_ranges_covered\n\n # Pretty print the debug location buckets.\n def pretty_print(self):\n if self.scope_bytes == 0:\n print ('No scope bytes found.')\n return -1\n\n pc_ranges_covered = self.get_pc_coverage()\n variables_coverage_per_map = {}\n for cov_bucket in coverage_buckets():\n variables_coverage_per_map[cov_bucket] = \\\n int(ceil(self.variables_coverage_map[cov_bucket] * 100.0) \\\n / self.variables_total_locstats)\n\n print (' =================================================')\n print (' Debug Location Statistics ')\n print (' =================================================')\n print (' cov% samples percentage(~) ')\n print (' -------------------------------------------------')\n for cov_bucket in coverage_buckets():\n print (' {0:10} {1:8d} {2:3d}%'. \\\n format(cov_bucket, self.variables_coverage_map[cov_bucket], \\\n variables_coverage_per_map[cov_bucket]))\n print (' =================================================')\n print (' -the number of debug variables processed: ' \\\n + str(self.variables_total_locstats))\n print (' -PC ranges covered: ' + str(pc_ranges_covered) + '%')\n\n # Only if we are processing all the variables output the total\n # availability.\n if self.variables_total and self.variables_with_loc:\n total_availability = int(ceil(self.variables_with_loc * 100.0) \\\n / self.variables_total)\n print (' -------------------------------------------------')\n print (' -total availability: ' + str(total_availability) + '%')\n print (' =================================================')\n\n return 0\n\n # Draw a plot representing the location buckets.\n def draw_plot(self):\n try:\n import matplotlib\n except ImportError:\n print('error: matplotlib not found.')\n sys.exit(1)\n\n from matplotlib import pyplot as plt\n\n buckets = range(len(self.variables_coverage_map))\n plt.figure(figsize=(12, 8))\n plt.title('Debug Location Statistics', fontweight='bold')\n plt.xlabel('location buckets')\n plt.ylabel('number of variables in the location buckets')\n plt.bar(buckets, self.variables_coverage_map.values(), align='center',\n tick_label=self.variables_coverage_map.keys(),\n label='variables of {}'.format(self.file_name))\n plt.xticks(rotation=45, fontsize='x-small')\n plt.yticks()\n\n # Place the text box with the coverage info.\n pc_ranges_covered = self.get_pc_coverage()\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n plt.text(0.02, 0.90, 'PC ranges covered: {}%'.format(pc_ranges_covered),\n transform=plt.gca().transAxes, fontsize=12,\n verticalalignment='top', bbox=props)\n plt.legend()\n plt.grid(color='grey', which='major', axis='y', linestyle='-', linewidth=0.3)\n\n plt.savefig('locstats.png')\n print('The plot was saved within \"locstats.png\".')\n\n# Define the location buckets.\ndef coverage_buckets():\n yield '0%'\n yield '(0%,10%)'\n for start in range(10, 91, 10):\n yield '[{0}%,{1}%)'.format(start, start + 10)\n yield '100%'\n\n# Parse the JSON representing the debug statistics, and create a\n# LocationStats object.\ndef parse_locstats(opts, binary):\n # These will be different due to different options enabled.\n variables_total = None\n variables_total_locstats = None\n variables_with_loc = None\n variables_scope_bytes_covered = None\n variables_scope_bytes = None\n variables_scope_bytes_entry_values = None\n variables_coverage_map = OrderedDict()\n\n # Get the directory of the LLVM tools.\n llvm_dwarfdump_cmd = os.path.join(os.path.dirname(__file__), \\\n \"llvm-dwarfdump\")\n # The statistics llvm-dwarfdump option.\n llvm_dwarfdump_stats_opt = \"--statistics\"\n\n # Generate the stats with the llvm-dwarfdump.\n subproc = Popen([llvm_dwarfdump_cmd, llvm_dwarfdump_stats_opt, binary], \\\n stdin=PIPE, stdout=PIPE, stderr=PIPE, \\\n universal_newlines = True)\n cmd_stdout, cmd_stderr = subproc.communicate()\n\n # Get the JSON and parse it.\n json_parsed = None\n\n try:\n json_parsed = loads(cmd_stdout)\n except:\n print ('error: No valid llvm-dwarfdump statistics found.')\n sys.exit(1)\n\n if opts.only_variables:\n # Read the JSON only for local variables.\n variables_total_locstats = \\\n json_parsed['total vars procesed by location statistics']\n variables_scope_bytes_covered = \\\n json_parsed['vars scope bytes covered']\n variables_scope_bytes = \\\n json_parsed['vars scope bytes total']\n if not opts.ignore_debug_entry_values:\n for cov_bucket in coverage_buckets():\n cov_category = \"vars with {} of its scope covered\".format(cov_bucket)\n variables_coverage_map[cov_bucket] = json_parsed[cov_category]\n else:\n variables_scope_bytes_entry_values = \\\n json_parsed['vars entry value scope bytes covered']\n variables_scope_bytes_covered = variables_scope_bytes_covered \\\n - variables_scope_bytes_entry_values\n for cov_bucket in coverage_buckets():\n cov_category = \\\n \"vars (excluding the debug entry values) \" \\\n \"with {} of its scope covered\".format(cov_bucket)\n variables_coverage_map[cov_bucket] = json_parsed[cov_category]\n elif opts.only_formal_parameters:\n # Read the JSON only for formal parameters.\n variables_total_locstats = \\\n json_parsed['total params procesed by location statistics']\n variables_scope_bytes_covered = \\\n json_parsed['formal params scope bytes covered']\n variables_scope_bytes = \\\n json_parsed['formal params scope bytes total']\n if not opts.ignore_debug_entry_values:\n for cov_bucket in coverage_buckets():\n cov_category = \"params with {} of its scope covered\".format(cov_bucket)\n variables_coverage_map[cov_bucket] = json_parsed[cov_category]\n else:\n variables_scope_bytes_entry_values = \\\n json_parsed['formal params entry value scope bytes covered']\n variables_scope_bytes_covered = variables_scope_bytes_covered \\\n - variables_scope_bytes_entry_values\n for cov_bucket in coverage_buckets():\n cov_category = \\\n \"params (excluding the debug entry values) \" \\\n \"with {} of its scope covered\".format(cov_bucket)\n variables_coverage_map[cov_bucket] = json_parsed[cov_category]\n else:\n # Read the JSON for both local variables and formal parameters.\n variables_total = \\\n json_parsed['source variables']\n variables_with_loc = json_parsed['variables with location']\n variables_total_locstats = \\\n json_parsed['total variables procesed by location statistics']\n variables_scope_bytes_covered = \\\n json_parsed['scope bytes covered']\n variables_scope_bytes = \\\n json_parsed['scope bytes total']\n if not opts.ignore_debug_entry_values:\n for cov_bucket in coverage_buckets():\n cov_category = \"variables with {} of its scope covered\". \\\n format(cov_bucket)\n variables_coverage_map[cov_bucket] = json_parsed[cov_category]\n else:\n variables_scope_bytes_entry_values = \\\n json_parsed['entry value scope bytes covered']\n variables_scope_bytes_covered = variables_scope_bytes_covered \\\n - variables_scope_bytes_entry_values\n for cov_bucket in coverage_buckets():\n cov_category = \"variables (excluding the debug entry values) \" \\\n \"with {} of its scope covered\". format(cov_bucket)\n variables_coverage_map[cov_bucket] = json_parsed[cov_category]\n\n return LocationStats(binary, variables_total, variables_total_locstats,\n variables_with_loc, variables_scope_bytes_covered,\n variables_scope_bytes, variables_coverage_map)\n\n# Parse the program arguments.\ndef parse_program_args(parser):\n parser.add_argument('--only-variables', action='store_true', default=False,\n help='calculate the location statistics only for local variables')\n parser.add_argument('--only-formal-parameters', action='store_true',\n default=False,\n help='calculate the location statistics only for formal parameters')\n parser.add_argument('--ignore-debug-entry-values', action='store_true',\n default=False,\n help='ignore the location statistics on locations with '\n 'entry values')\n parser.add_argument('--draw-plot', action='store_true', default=False,\n help='show histogram of location buckets generated (requires '\n 'matplotlib)')\n parser.add_argument('file_name', type=str, help='file to process')\n\n return parser.parse_args()\n\n# Verify that the program inputs meet the requirements.\ndef verify_program_inputs(opts):\n if len(sys.argv) < 2:\n print ('error: Too few arguments.')\n return False\n\n if opts.only_variables and opts.only_formal_parameters:\n print ('error: Please use just one --only* option.')\n return False\n\n return True\n\ndef Main():\n parser = argparse.ArgumentParser()\n opts = parse_program_args(parser)\n\n if not verify_program_inputs(opts):\n parser.print_help()\n sys.exit(1)\n\n binary = opts.file_name\n locstats = parse_locstats(opts, binary)\n\n if opts.draw_plot:\n # Draw a histogram representing the location buckets.\n locstats.draw_plot()\n else:\n # Pretty print collected info on the standard output.\n if locstats.pretty_print() == -1:\n sys.exit(0)\n\nif __name__ == '__main__':\n Main()\n sys.exit(0)\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.savefig", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.gca", "matplotlib.pyplot.xticks" ] ]
ajeffries0492/DocProj
[ "75ac03ed195f966b607a45b04c7dd76f18efe6d6" ]
[ "eval.py" ]
[ "import os\r\nimport cv2\r\nimport shutil\r\nimport numpy as np\r\nimport skimage\r\nfrom skimage import io\r\nfrom skimage import transform as tf\r\nfrom skimage import img_as_ubyte\r\nimport argparse\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nfrom torchvision import transforms\r\nfrom collections import OrderedDict\r\n\r\nfrom modelGeoNet import GeoNet\r\n\r\n# For parsing commandline arguments\r\nparser = argparse.ArgumentParser()\r\n\r\nparser.add_argument(\"--imgPath\", type=str, default='./IMG_1164.jpg', help='input image path')\r\nparser.add_argument(\"--modelPath\", type=str, default='./model_GeoNet.pkl', help='pre-trained model path')\r\nparser.add_argument(\"--saveImgPath\", type=str, default='./IMG_1164_resized.png', help='resized image path')\r\nparser.add_argument(\"--saveFlowPath\", type=str, default='./IMG_1164.npy', help='saved flows path')\r\nargs = parser.parse_args()\r\n\r\ndef resizeImg(imgPath, H, W):\r\n \r\n '''\r\n resize while keeping the aspect ratio and then crop the image to a given shape (H, W)\r\n '''\r\n\r\n img = io.imread(imgPath)\r\n h, w = img.shape[0:2]\r\n \r\n if h > w:\r\n ratio = float(h)/float(w)\r\n\r\n if (ratio > float(H)/float(W)):\r\n img = skimage.transform.resize(img, (int(ratio*W), W), order=1)\r\n else:\r\n img = skimage.transform.resize(img, (H, int(H/ratio)), order=1)\r\n\r\n yc = int(img.shape[0]/2)\r\n xc = int(img.shape[1]/2)\r\n img = img[yc - int(H/2):yc + int(H/2), xc - int(W/2):xc + int(W/2)]\r\n \r\n else:\r\n ratio = float(w)/float(h)\r\n \r\n if (ratio > float(H)/float(W)):\r\n img = skimage.transform.resize(img, (W, int(W*ratio)), order=1)\r\n else:\r\n img = skimage.transform.resize(img, (int(H/ratio), H), order=1)\r\n \r\n yc = int(img.shape[0]/2)\r\n xc = int(img.shape[1]/2)\r\n img = img[yc - int(W/2):yc + int(W/2), xc - int(H/2):xc + int(H/2)]\r\n \r\n return img\r\n\r\ndef padImg(img):\r\n '''\r\n pad image twice.\r\n The first padding is to make sure the patches cover all image regions.\r\n The second padding is used for cropping the global patch.\r\n '''\r\n \r\n H = img.shape[0]\r\n W = img.shape[1]\r\n \r\n globalFct = 4\r\n patchRes = 256\r\n ovlp = int(patchRes * 0.25)\r\n \r\n padH = (int((H - patchRes)/(patchRes - ovlp) + 1) * (patchRes - ovlp) + patchRes) - H\r\n padW = (int((W - patchRes)/(patchRes - ovlp) + 1) * (patchRes - ovlp) + patchRes) - W\r\n \r\n padding = int(patchRes * (globalFct - 1) / 2.0)\r\n\r\n padImg = cv2.copyMakeBorder(img, 0, padH, 0, padW, cv2.BORDER_REPLICATE)\r\n padImg = cv2.copyMakeBorder(padImg, padding, padding, padding, padding, cv2.BORDER_REPLICATE)\r\n \r\n return padImg\r\n\r\ndef cropToPatch(img):\r\n '''\r\n crop the image to local and global patches\r\n '''\r\n\r\n H = img.shape[0]\r\n W = img.shape[1]\r\n \r\n globalFct = 4\r\n patchRes = 256\r\n ovlp = int(patchRes * 0.25)\r\n padding = int(patchRes * (globalFct - 1) / 2.0)\r\n\r\n cropH = patchRes\r\n cropW = patchRes\r\n\r\n ynum = int((H - (globalFct - 1) * cropH - cropH)/(cropH - ovlp)) + 1\r\n xnum = int((W - (globalFct - 1) * cropW - cropW)/(cropW - ovlp)) + 1\r\n \r\n totalLocal = np.zeros((ynum, xnum, patchRes, patchRes, 3), dtype=np.uint8)\r\n totalGloba = np.zeros((ynum, xnum, 256, 256, 3), dtype=np.uint8)\r\n\r\n for j in range(0, ynum):\r\n for i in range(0, xnum):\r\n\r\n x = int(padding + i * (cropW - ovlp))\r\n y = int(padding + j * (cropH - ovlp))\r\n\r\n totalLocal[j, i] = img[y:int(y + patchRes), x:int(x + patchRes)]\r\n\r\n gx = int(x - padding)\r\n gy = int(y - padding)\r\n globalpatch = img[gy:int(gy + globalFct * patchRes), gx:int(gx + globalFct * patchRes)]\r\n globalpatch = skimage.transform.resize(globalpatch, (256, 256)) * 255.0\r\n totalGloba[j, i] = globalpatch\r\n \r\n return totalLocal, totalGloba\r\n\r\n\r\n\r\ndef testRealFlow(modelPath, localPatch, globalPatch):\r\n '''\r\n estimate the flows\r\n '''\r\n\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\r\n\r\n model = GeoNet([1, 1, 1, 1, 1])\r\n\r\n if torch.cuda.is_available():\r\n model = model.cuda()\r\n \r\n if torch.cuda.device_count() > 1:\r\n model = nn.DataParallel(model)\r\n model.load_state_dict(torch.load(modelPath))\r\n else:\r\n state_dict = torch.load(modelPath)\r\n new_state_dict = OrderedDict()\r\n for k, v in state_dict.items():\r\n name = k[7:]\r\n new_state_dict[name] = v\r\n model.load_state_dict(new_state_dict) \r\n\r\n \r\n \r\n model.eval()\r\n \r\n ynum = localPatch.shape[0]\r\n xnum = localPatch.shape[1]\r\n scal = localPatch.shape[2]\r\n \r\n totalFlow = np.zeros((ynum, xnum, 2, scal, scal), dtype = np.float32)\r\n \r\n for j in range(0, ynum):\r\n for i in range(0, xnum):\r\n\r\n temp_localPatch = localPatch[j, i]\r\n temp_globaPatch = globalPatch[j, i]\r\n \r\n temp_localPatch = transform(temp_localPatch)\r\n temp_globaPatch = transform(temp_globaPatch)\r\n\r\n if torch.cuda.is_available():\r\n temp_localPatch = temp_localPatch.cuda()\r\n temp_globaPatch = temp_globaPatch.cuda()\r\n\r\n temp_localPatch = temp_localPatch.view(1,3,scal,scal)\r\n temp_globaPatch = temp_globaPatch.view(1,3,256,256)\r\n \r\n temp_localPatch = Variable(temp_localPatch)\r\n temp_globaPatch = Variable(temp_globaPatch)\r\n \r\n flow_output = model(temp_localPatch, temp_globaPatch)\r\n\r\n u = flow_output.data.cpu().numpy()[0][0]\r\n v = flow_output.data.cpu().numpy()[0][1]\r\n \r\n totalFlow[j,i,0] = u\r\n totalFlow[j,i,1] = v\r\n\r\n return totalFlow\r\n\r\n\r\nimg = resizeImg(args.imgPath, H = 2000, W = 1500)\r\n#io.imsave(args.saveImgPath, img_as_ubyte(img))\r\nio.imsave(args.saveImgPath, img)\r\nimg = padImg(img)\r\ntotalLocalPatch, totalGlobaPatch = cropToPatch(img)\r\ntotalFlow = testRealFlow(args.modelPath, totalLocalPatch, totalGlobaPatch)\r\nnp.save(args.saveFlowPath, totalFlow)\r\n" ]
[ [ "numpy.zeros", "torch.autograd.Variable", "numpy.save", "torch.cuda.device_count", "torch.cuda.is_available", "torch.load", "torch.nn.DataParallel" ] ]
cuihantao/steps
[ "60327bf42299cb7117ed5907a931583d7cdf590d" ]
[ "python/stepspy-current/stepspy/pouch.py" ]
[ "#coding=utf-8\nimport time\nimport struct\nimport csv\nimport os.path\nimport math\n\ntry:\n import numpy\nexcept ImportError:\n print(\"pouch is dependent on module numpy which is missing. please install numpy before use pouch\")\n\ndef __is_filename_ends_with(filename, surfix):\n filename = filename.upper()\n surfix = surfix.upper()\n if filename.endswith(surfix):\n return True\n else:\n return False\n\ndef __save_data(file_name, dy_time, dy_value, dy_channel):\n with open(file_name,'wt') as fid:\n writer = csv.writer(fid)\n channels = ['Time'.encode('utf-8')]\n for chan in dy_channel:\n channels.append(chan.encode('utf-8'))\n writer.writerow(channels)\n for i in range(len(dy_time)):\n value = [dy_time[i]]\n for j in range(len(dy_channel)):\n value.append(dy_value[i,j])\n writer.writerow(value)\n \ndef POUCH(file_name, type, save_or_not=False, show_log=True):\n type = type.upper()\n\n if type in [\"CSV\"]:\n return POUCH_CSV(file_name, show_log)\n \n if type in [\"STEPS\"]:\n return POUCH_STEPS(file_name, save_or_not, show_log)\n print(\"Power System Simulator type invalid\")\n return numpy.array([]), numpy.array([]), []\n \ndef POUCH_CSV(file_name, show_log=True):\n \"\"\"\n Usage:\n dy_time, dy_value, dy_channel = POUCH_CSV(file_name, show_log=True)\n file_name: csv file ending with '.csv'. the first line of the csv file should be channels' name quoted by '\"'\n save_or_not: logic. True for saving to csv file, False for ignoring saving\n show_log: logic. True for showing log, False for diabling log\n dy_time: numpy array of simulation time\n dy_value: numpy array of values of all channels\n dy_channel: list of name of all channels\n \"\"\"\n if show_log==True:\n info = \"CSV File Conversion Program V1.2.1 (2019/07/05)\\n\"\n info += \"Supports: Common CSV files with header, or Power Factory 15.1\\n\"\n info += \"Changgang Li ([email protected])\"\n\n print(info) \n \n if not os.path.exists(file_name):\n info = '**** There is no csv file '+file_name+'.\\n**** Please check csv file.'\n print(info)\n return numpy.array([]), numpy.array([]), []\n \n dy_time = []\n dy_value = []\n dy_channel = []\n \n if not __is_filename_ends_with(file_name, '.csv'):\n info = '**** '+file_name+' is not ending with .csv. Please check CSV file name.'\n print(info)\n return numpy.array(dy_time), numpy.array(dy_value), dy_channel\n\n start_time = time.clock()\n \n with open(file_name, 'rt') as fid:\n data = fid.readline()\n data = data[0:-1] # remove the last \\n char\n data = data.replace('\"','')\n channels = data.split(',')\n for i in range(len(channels)):\n channel = channels[i]\n channel = channel.strip('\"')\n channel = channel.strip()\n channels[i] = channel\n \n dy_channel = channels[1:len(channels)+1]\n \n reader = csv.reader(fid, quoting=csv.QUOTE_NONNUMERIC)\n for data in reader:\n dy_time.append(data[0])\n dy_value.append(data[1:len(data)+1])\n if show_log==True:\n end_time = time.clock()\n time_elapsed = end_time - start_time\n \n info = 'Conversion finished in '+str(float(int(time_elapsed*1000.0))*0.001)+'s'\n print(info)\n\n return numpy.array(dy_time), (numpy.array(dy_value)), dy_channel\n \ndef POUCH_STEPS(file_name, save_or_not=False, show_log=True):\n \"\"\"\n Usage:\n dy_time, dy_value, dy_channel = POUCH_STEPS(file_name, save_or_not=False, show_log=True)\n file_name: STEPS binary file ending with '.bin'\n save_or_not: logic. True for saving to csv file, False for ignoring saving\n show_log: logic. True for showing log, False for diabling log\n dy_time: numpy array of simulation time\n dy_value: numpy array of values of all channels\n dy_channel: list of name of all channels\n \"\"\"\n if show_log==True:\n info = \"STEPS Bin File Conversion Program V0.0.2 (2019/07/05)\\n\"\n info += \"Supports: STEPS 20190416\\n\"\n info += \"Changgang Li ([email protected])\"\n print(info)\n \n if not os.path.exists(file_name):\n info = '**** There is no bin file '+file_name+'.\\n**** Please check STEPS bin file.'\n print(info)\n return numpy.array([]), numpy.array([]), []\n \n dy_time = []\n dy_value = []\n dy_channel = []\n \n if not __is_filename_ends_with(file_name, '.bin'):\n info = '**** '+file_name+' is not ending with .bin. Please check STEPS bin file name.'\n print(info)\n return numpy.array(dy_time), numpy.array(dy_value), dy_channel\n\n start_time = time.clock()\n \n fid = open(file_name, 'rb')\n steps_bin_version = fid.read(4)\n steps_bin_version = struct.unpack('I', steps_bin_version)\n steps_bin_version = steps_bin_version[0]\n fid.close()\n \n if steps_bin_version==0:\n dy_time, dy_value, dy_channel = __POUCH_STEPS_0(file_name, show_log)\n \n if save_or_not == True:\n file_name = file_name+'.csv'\n __save_data(file_name, dy_time, dy_value, dy_channel)\n \n \n end_time = time.clock()\n time_elapsed = end_time - start_time\n if show_log==True:\n info = 'Conversion finished in '+str(float(int(time_elapsed*1000.0))*0.001)+'s'\n print(info)\n \n return dy_time, dy_value, dy_channel\n\ndef __POUCH_STEPS_0(file_name, show_log=True):\n dy_time = []\n dy_value = []\n dy_channel = []\n \n with open(file_name, 'rb') as fid:\n steps_bin_version = fid.read(4) # get STEPS version\n steps_bin_version = struct.unpack('I', steps_bin_version)\n steps_bin_version = steps_bin_version[0]\n \n case_time = fid.read(4*6) # get case time\n case_time = struct.unpack('6I', case_time)\n case_year = case_time[0]\n case_month = case_time[1]\n case_day = case_time[2]\n case_hour = case_time[3]\n case_minute = case_time[4]\n case_second = case_time[5]\n print(case_time)\n print(case_year, case_month, case_day, case_hour, case_minute, case_second)\n \n float_size = fid.read(4) # get float size\n float_size = struct.unpack('I',float_size)\n float_size = float_size[0]\n \n n_channels = fid.read(4) # get 4 bytes: channel count\n n_channels = struct.unpack('I',n_channels)\n n_channels = n_channels[0]\n n_channel_bytes = fid.read(4) # get 4 bytes: channel name bytes count\n n_channel_bytes = struct.unpack('I',n_channel_bytes)\n n_channel_bytes = n_channel_bytes[0]\n\n channels = fid.read(n_channel_bytes) # bytes of channel names\n channels = channels.decode(\"cp936\")\n channels = channels.strip()\n\n channels = channels.split('\\n')\n for i in range(1,n_channels):\n dy_channel.append(channels[i])\n \n while True: # loop for each time point\n data = fid.read(n_channels*float_size) # get values\n if len(data)<n_channels*float_size:\n break\n if float_size==4:\n data = struct.unpack('f'*n_channels, data)\n else:\n data = struct.unpack('d'*n_channels, data)\n t = data[0]\n value = data[1:n_channels]\n dy_time.append(t)\n dy_value.append(value) \n\n return dy_time, dy_value, dy_channel\n" ]
[ [ "numpy.array" ] ]
mightmay/Mien-TTS
[ "8a22ff0a79558b3cf4981ce1b63f4d1485ea6338" ]
[ "TTS/tts/tf/utils/convert_torch_to_tf_utils.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\n# NOTE: linter has a problem with the current TF release\n#pylint: disable=no-value-for-parameter\n#pylint: disable=unexpected-keyword-arg\n\ndef tf_create_dummy_inputs():\n \"\"\" Create dummy inputs for TF Tacotron2 model \"\"\"\n batch_size = 4\n max_input_length = 32\n max_mel_length = 128\n pad = 1\n n_chars = 24\n input_ids = tf.random.uniform([batch_size, max_input_length + pad], maxval=n_chars, dtype=tf.int32)\n input_lengths = np.random.randint(0, high=max_input_length+1 + pad, size=[batch_size])\n input_lengths[-1] = max_input_length\n input_lengths = tf.convert_to_tensor(input_lengths, dtype=tf.int32)\n mel_outputs = tf.random.uniform(shape=[batch_size, max_mel_length + pad, 80])\n mel_lengths = np.random.randint(0, high=max_mel_length+1 + pad, size=[batch_size])\n mel_lengths[-1] = max_mel_length\n mel_lengths = tf.convert_to_tensor(mel_lengths, dtype=tf.int32)\n return input_ids, input_lengths, mel_outputs, mel_lengths\n\n\ndef compare_torch_tf(torch_tensor, tf_tensor):\n \"\"\" Compute the average absolute difference b/w torch and tf tensors \"\"\"\n return abs(torch_tensor.detach().numpy() - tf_tensor.numpy()).mean()\n\n\ndef convert_tf_name(tf_name):\n \"\"\" Convert certain patterns in TF layer names to Torch patterns \"\"\"\n tf_name_tmp = tf_name\n tf_name_tmp = tf_name_tmp.replace(':0', '')\n tf_name_tmp = tf_name_tmp.replace('/forward_lstm/lstm_cell_1/recurrent_kernel', '/weight_hh_l0')\n tf_name_tmp = tf_name_tmp.replace('/forward_lstm/lstm_cell_2/kernel', '/weight_ih_l1')\n tf_name_tmp = tf_name_tmp.replace('/recurrent_kernel', '/weight_hh')\n tf_name_tmp = tf_name_tmp.replace('/kernel', '/weight')\n tf_name_tmp = tf_name_tmp.replace('/gamma', '/weight')\n tf_name_tmp = tf_name_tmp.replace('/beta', '/bias')\n tf_name_tmp = tf_name_tmp.replace('/', '.')\n return tf_name_tmp\n\n\ndef transfer_weights_torch_to_tf(tf_vars, var_map_dict, state_dict):\n \"\"\" Transfer weigths from torch state_dict to TF variables \"\"\"\n print(\" > Passing weights from Torch to TF ...\")\n for tf_var in tf_vars:\n torch_var_name = var_map_dict[tf_var.name]\n print(f' | > {tf_var.name} <-- {torch_var_name}')\n # if tuple, it is a bias variable\n if not isinstance(torch_var_name, tuple):\n torch_layer_name = '.'.join(torch_var_name.split('.')[-2:])\n torch_weight = state_dict[torch_var_name]\n if 'convolution1d/kernel' in tf_var.name or 'conv1d/kernel' in tf_var.name:\n # out_dim, in_dim, filter -> filter, in_dim, out_dim\n numpy_weight = torch_weight.permute([2, 1, 0]).detach().cpu().numpy()\n elif 'lstm_cell' in tf_var.name and 'kernel' in tf_var.name:\n numpy_weight = torch_weight.transpose(0, 1).detach().cpu().numpy()\n # if variable is for bidirectional lstm and it is a bias vector there\n # needs to be pre-defined two matching torch bias vectors\n elif '_lstm/lstm_cell_' in tf_var.name and 'bias' in tf_var.name:\n bias_vectors = [value for key, value in state_dict.items() if key in torch_var_name]\n assert len(bias_vectors) == 2\n numpy_weight = bias_vectors[0] + bias_vectors[1]\n elif 'rnn' in tf_var.name and 'kernel' in tf_var.name:\n numpy_weight = torch_weight.transpose(0, 1).detach().cpu().numpy()\n elif 'rnn' in tf_var.name and 'bias' in tf_var.name:\n bias_vectors = [value for key, value in state_dict.items() if torch_var_name[:-2] in key]\n assert len(bias_vectors) == 2\n numpy_weight = bias_vectors[0] + bias_vectors[1]\n elif 'linear_layer' in torch_layer_name and 'weight' in torch_var_name:\n numpy_weight = torch_weight.transpose(0, 1).detach().cpu().numpy()\n else:\n numpy_weight = torch_weight.detach().cpu().numpy()\n assert np.all(tf_var.shape == numpy_weight.shape), f\" [!] weight shapes does not match: {tf_var.name} vs {torch_var_name} --> {tf_var.shape} vs {numpy_weight.shape}\"\n tf.keras.backend.set_value(tf_var, numpy_weight)\n return tf_vars\n\n\ndef load_tf_vars(model_tf, tf_vars):\n for tf_var in tf_vars:\n model_tf.get_layer(tf_var.name).set_weights(tf_var)\n return model_tf\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.keras.backend.set_value", "tensorflow.random.uniform", "numpy.random.randint", "numpy.all" ] ]
mikael-jorda/geomstats-robotic-example
[ "e51634ade6b2dbae8103cc987fac6874daa5a1ef" ]
[ "robot_so3.py" ]
[ "\"\"\"\nGenerate a geodesic of SO(3) equipped\nwith its left-invariant canonical METRIC\nfor trajectory generation for a robotic manipulator\nand sends the trajectory through a redis server at a selected rate\n\"\"\"\n\nimport time\n\nfrom geomstats.special_orthogonal_group import SpecialOrthogonalGroup\n\nimport numpy as np\n\nimport redis\n\nSO3_GROUP = SpecialOrthogonalGroup(n=3)\nMETRIC = SO3_GROUP.bi_invariant_metric\n\nredis_server = redis.StrictRedis(host='localhost', port=6379, db=0)\nDESIRED_ORIENTATION_KEY = \"geomstats_examples::desired_orientation\"\nDESIRED_POSITION_KEY = \"geomstats_examples::desired_position\"\n\ntrajectory_time_seconds = 5.0\nloop_frequency_Hz = 100.0\n\n\ndef decode_vector_redis(redis_key):\n \"\"\"\n reads a the value corresponding to 'redis_key'\n from the redis server and returns it as a np 1D array\n \"\"\"\n return np.array(str(redis_server.get(redis_key)).split('\\'')[1].split(' '))\n\n\ndef decode_matrix_redis(redis_key):\n \"\"\"\n reads a the value corresponding to 'redis_key'\n from the redis server and returns it as a 2D array\n \"\"\"\n lines = str(redis_server.get(redis_key)).split('\\'')[1].split('; ')\n matrix = np.array([x.split(' ') for x in lines])\n\n return matrix.astype(np.float)\n\n\ndef encode_matrix_redis(redis_key, mat):\n \"\"\"\n writes a np 2D array 'mat' to the redis server\n using the key 'redis_key'\n \"\"\"\n s = ''\n\n for j in range(mat.shape[1]):\n s += str(mat[0][j])\n s += ' '\n s = s[0:-1]\n s += '; '\n\n for i in range(1, mat.shape[0]):\n for j in range(mat.shape[1]):\n s += str(mat[i][j])\n s += ' '\n s = s[0:-1]\n s += '; '\n s = s[0:-2]\n\n redis_server.set(redis_key, s)\n\n\ndef main():\n\n initial_orientation = decode_matrix_redis(DESIRED_ORIENTATION_KEY)\n initial_point = SO3_GROUP.rotation_vector_from_matrix(initial_orientation)\n\n final_orientation = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n final_point = SO3_GROUP.rotation_vector_from_matrix(final_orientation)\n\n geodesic = METRIC.geodesic(initial_point=initial_point,\n end_point=final_point)\n\n n_steps = int(trajectory_time_seconds * loop_frequency_Hz)\n t = np.linspace(0, 1, n_steps)\n current_step = 0\n\n points = geodesic(t)\n\n period = 1.0 / loop_frequency_Hz\n t_init = time.time()\n t = t_init\n\n while(current_step < n_steps):\n t += period\n\n current_point = points[current_step]\n rot_desired = SO3_GROUP.matrix_from_rotation_vector(current_point)[0]\n encode_matrix_redis(DESIRED_ORIENTATION_KEY, rot_desired)\n\n current_step = current_step + 1\n time.sleep(max(0.0, t - time.time()))\n\n elapsed_time = time.time() - t_init\n print(\"Elapsed time : \", elapsed_time, \" seconds\")\n print(\"Loop cycles : \", current_step)\n print(\"Frequency : \", current_step / elapsed_time, \" Hz\")\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array", "numpy.linspace" ] ]
zeitlinv/pandas
[ "08d296f1278e08b407448c95086589e1d10285f9" ]
[ "pandas/tests/window/test_groupby.py" ]
[ "import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n to_datetime,\n)\nimport pandas._testing as tm\nfrom pandas.api.indexers import BaseIndexer\nfrom pandas.core.groupby.groupby import get_groupby\n\n\nclass TestRolling:\n def setup_method(self):\n self.frame = DataFrame({\"A\": [1] * 20 + [2] * 12 + [3] * 8, \"B\": np.arange(40)})\n\n def test_mutated(self):\n\n msg = r\"groupby\\(\\) got an unexpected keyword argument 'foo'\"\n with pytest.raises(TypeError, match=msg):\n self.frame.groupby(\"A\", foo=1)\n\n g = self.frame.groupby(\"A\")\n assert not g.mutated\n g = get_groupby(self.frame, by=\"A\", mutated=True)\n assert g.mutated\n\n def test_getitem(self):\n g = self.frame.groupby(\"A\")\n g_mutated = get_groupby(self.frame, by=\"A\", mutated=True)\n\n expected = g_mutated.B.apply(lambda x: x.rolling(2).mean())\n\n result = g.rolling(2).mean().B\n tm.assert_series_equal(result, expected)\n\n result = g.rolling(2).B.mean()\n tm.assert_series_equal(result, expected)\n\n result = g.B.rolling(2).mean()\n tm.assert_series_equal(result, expected)\n\n result = self.frame.B.groupby(self.frame.A).rolling(2).mean()\n tm.assert_series_equal(result, expected)\n\n def test_getitem_multiple(self):\n\n # GH 13174\n g = self.frame.groupby(\"A\")\n r = g.rolling(2, min_periods=0)\n g_mutated = get_groupby(self.frame, by=\"A\", mutated=True)\n expected = g_mutated.B.apply(lambda x: x.rolling(2, min_periods=0).count())\n\n result = r.B.count()\n tm.assert_series_equal(result, expected)\n\n result = r.B.count()\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"f\",\n [\n \"sum\",\n \"mean\",\n \"min\",\n \"max\",\n pytest.param(\n \"count\",\n marks=pytest.mark.filterwarnings(\"ignore:min_periods:FutureWarning\"),\n ),\n \"kurt\",\n \"skew\",\n ],\n )\n def test_rolling(self, f):\n g = self.frame.groupby(\"A\")\n r = g.rolling(window=4)\n\n result = getattr(r, f)()\n expected = g.apply(lambda x: getattr(x.rolling(4), f)())\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop(\"A\", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([self.frame[\"A\"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"f\", [\"std\", \"var\"])\n def test_rolling_ddof(self, f):\n g = self.frame.groupby(\"A\")\n r = g.rolling(window=4)\n\n result = getattr(r, f)(ddof=1)\n expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop(\"A\", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([self.frame[\"A\"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"interpolation\", [\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"]\n )\n def test_rolling_quantile(self, interpolation):\n g = self.frame.groupby(\"A\")\n r = g.rolling(window=4)\n\n result = r.quantile(0.4, interpolation=interpolation)\n expected = g.apply(\n lambda x: x.rolling(4).quantile(0.4, interpolation=interpolation)\n )\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop(\"A\", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([self.frame[\"A\"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"f\", [\"corr\", \"cov\"])\n def test_rolling_corr_cov(self, f):\n g = self.frame.groupby(\"A\")\n r = g.rolling(window=4)\n\n result = getattr(r, f)(self.frame)\n\n def func(x):\n return getattr(x.rolling(4), f)(self.frame)\n\n expected = g.apply(func)\n # GH 39591: The grouped column should be all np.nan\n # (groupby.apply inserts 0s for cov)\n expected[\"A\"] = np.nan\n tm.assert_frame_equal(result, expected)\n\n result = getattr(r.B, f)(pairwise=True)\n\n def func(x):\n return getattr(x.B.rolling(4), f)(pairwise=True)\n\n expected = g.apply(func)\n tm.assert_series_equal(result, expected)\n\n def test_rolling_apply(self, raw):\n g = self.frame.groupby(\"A\")\n r = g.rolling(window=4)\n\n # reduction\n result = r.apply(lambda x: x.sum(), raw=raw)\n expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw))\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop(\"A\", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([self.frame[\"A\"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n def test_rolling_apply_mutability(self):\n # GH 14013\n df = DataFrame({\"A\": [\"foo\"] * 3 + [\"bar\"] * 3, \"B\": [1] * 6})\n g = df.groupby(\"A\")\n\n mi = MultiIndex.from_tuples(\n [(\"bar\", 3), (\"bar\", 4), (\"bar\", 5), (\"foo\", 0), (\"foo\", 1), (\"foo\", 2)]\n )\n\n mi.names = [\"A\", None]\n # Grouped column should not be a part of the output\n expected = DataFrame([np.nan, 2.0, 2.0] * 2, columns=[\"B\"], index=mi)\n\n result = g.rolling(window=2).sum()\n tm.assert_frame_equal(result, expected)\n\n # Call an arbitrary function on the groupby\n g.sum()\n\n # Make sure nothing has been mutated\n result = g.rolling(window=2).sum()\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"expected_value,raw_value\", [[1.0, True], [0.0, False]])\n def test_groupby_rolling(self, expected_value, raw_value):\n # GH 31754\n\n def foo(x):\n return int(isinstance(x, np.ndarray))\n\n df = DataFrame({\"id\": [1, 1, 1], \"value\": [1, 2, 3]})\n result = df.groupby(\"id\").value.rolling(1).apply(foo, raw=raw_value)\n expected = Series(\n [expected_value] * 3,\n index=MultiIndex.from_tuples(((1, 0), (1, 1), (1, 2)), names=[\"id\", None]),\n name=\"value\",\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_rolling_center_center(self):\n # GH 35552\n series = Series(range(1, 6))\n result = series.groupby(series).rolling(center=True, window=3).mean()\n expected = Series(\n [np.nan] * 5,\n index=MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3), (5, 4))),\n )\n tm.assert_series_equal(result, expected)\n\n series = Series(range(1, 5))\n result = series.groupby(series).rolling(center=True, window=3).mean()\n expected = Series(\n [np.nan] * 4,\n index=MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3))),\n )\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({\"a\": [\"a\"] * 5 + [\"b\"] * 6, \"b\": range(11)})\n result = df.groupby(\"a\").rolling(center=True, window=3).mean()\n expected = DataFrame(\n [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, 9, np.nan],\n index=MultiIndex.from_tuples(\n (\n (\"a\", 0),\n (\"a\", 1),\n (\"a\", 2),\n (\"a\", 3),\n (\"a\", 4),\n (\"b\", 5),\n (\"b\", 6),\n (\"b\", 7),\n (\"b\", 8),\n (\"b\", 9),\n (\"b\", 10),\n ),\n names=[\"a\", None],\n ),\n columns=[\"b\"],\n )\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({\"a\": [\"a\"] * 5 + [\"b\"] * 5, \"b\": range(10)})\n result = df.groupby(\"a\").rolling(center=True, window=3).mean()\n expected = DataFrame(\n [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan],\n index=MultiIndex.from_tuples(\n (\n (\"a\", 0),\n (\"a\", 1),\n (\"a\", 2),\n (\"a\", 3),\n (\"a\", 4),\n (\"b\", 5),\n (\"b\", 6),\n (\"b\", 7),\n (\"b\", 8),\n (\"b\", 9),\n ),\n names=[\"a\", None],\n ),\n columns=[\"b\"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_center_on(self):\n # GH 37141\n df = DataFrame(\n data={\n \"Date\": date_range(\"2020-01-01\", \"2020-01-10\"),\n \"gb\": [\"group_1\"] * 6 + [\"group_2\"] * 4,\n \"value\": range(10),\n }\n )\n result = (\n df.groupby(\"gb\")\n .rolling(6, on=\"Date\", center=True, min_periods=1)\n .value.mean()\n )\n expected = Series(\n [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 7.0, 7.5, 7.5, 7.5],\n name=\"value\",\n index=MultiIndex.from_tuples(\n (\n (\"group_1\", Timestamp(\"2020-01-01\")),\n (\"group_1\", Timestamp(\"2020-01-02\")),\n (\"group_1\", Timestamp(\"2020-01-03\")),\n (\"group_1\", Timestamp(\"2020-01-04\")),\n (\"group_1\", Timestamp(\"2020-01-05\")),\n (\"group_1\", Timestamp(\"2020-01-06\")),\n (\"group_2\", Timestamp(\"2020-01-07\")),\n (\"group_2\", Timestamp(\"2020-01-08\")),\n (\"group_2\", Timestamp(\"2020-01-09\")),\n (\"group_2\", Timestamp(\"2020-01-10\")),\n ),\n names=[\"gb\", \"Date\"],\n ),\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"min_periods\", [5, 4, 3])\n def test_groupby_rolling_center_min_periods(self, min_periods):\n # GH 36040\n df = DataFrame({\"group\": [\"A\"] * 10 + [\"B\"] * 10, \"data\": range(20)})\n\n window_size = 5\n result = (\n df.groupby(\"group\")\n .rolling(window_size, center=True, min_periods=min_periods)\n .mean()\n )\n result = result.reset_index()[[\"group\", \"data\"]]\n\n grp_A_mean = [1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 7.5, 8.0]\n grp_B_mean = [x + 10.0 for x in grp_A_mean]\n\n num_nans = max(0, min_periods - 3) # For window_size of 5\n nans = [np.nan] * num_nans\n grp_A_expected = nans + grp_A_mean[num_nans : 10 - num_nans] + nans\n grp_B_expected = nans + grp_B_mean[num_nans : 10 - num_nans] + nans\n\n expected = DataFrame(\n {\"group\": [\"A\"] * 10 + [\"B\"] * 10, \"data\": grp_A_expected + grp_B_expected}\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_subselect_rolling(self):\n # GH 35486\n df = DataFrame(\n {\"a\": [1, 2, 3, 2], \"b\": [4.0, 2.0, 3.0, 1.0], \"c\": [10, 20, 30, 20]}\n )\n result = df.groupby(\"a\")[[\"b\"]].rolling(2).max()\n expected = DataFrame(\n [np.nan, np.nan, 2.0, np.nan],\n columns=[\"b\"],\n index=MultiIndex.from_tuples(\n ((1, 0), (2, 1), (2, 3), (3, 2)), names=[\"a\", None]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(\"a\")[\"b\"].rolling(2).max()\n expected = Series(\n [np.nan, np.nan, 2.0, np.nan],\n index=MultiIndex.from_tuples(\n ((1, 0), (2, 1), (2, 3), (3, 2)), names=[\"a\", None]\n ),\n name=\"b\",\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_rolling_custom_indexer(self):\n # GH 35557\n class SimpleIndexer(BaseIndexer):\n def get_window_bounds(\n self, num_values=0, min_periods=None, center=None, closed=None\n ):\n min_periods = self.window_size if min_periods is None else 0\n end = np.arange(num_values, dtype=np.int64) + 1\n start = end.copy() - self.window_size\n start[start < 0] = min_periods\n return start, end\n\n df = DataFrame(\n {\"a\": [1.0, 2.0, 3.0, 4.0, 5.0] * 3}, index=[0] * 5 + [1] * 5 + [2] * 5\n )\n result = (\n df.groupby(df.index)\n .rolling(SimpleIndexer(window_size=3), min_periods=1)\n .sum()\n )\n expected = df.groupby(df.index).rolling(window=3, min_periods=1).sum()\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_subset_with_closed(self):\n # GH 35549\n df = DataFrame(\n {\n \"column1\": range(6),\n \"column2\": range(6),\n \"group\": 3 * [\"A\", \"B\"],\n \"date\": [Timestamp(\"2019-01-01\")] * 6,\n }\n )\n result = (\n df.groupby(\"group\").rolling(\"1D\", on=\"date\", closed=\"left\")[\"column1\"].sum()\n )\n expected = Series(\n [np.nan, 0.0, 2.0, np.nan, 1.0, 4.0],\n index=MultiIndex.from_tuples(\n [(\"A\", Timestamp(\"2019-01-01\"))] * 3\n + [(\"B\", Timestamp(\"2019-01-01\"))] * 3,\n names=[\"group\", \"date\"],\n ),\n name=\"column1\",\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_subset_rolling_subset_with_closed(self):\n # GH 35549\n df = DataFrame(\n {\n \"column1\": range(6),\n \"column2\": range(6),\n \"group\": 3 * [\"A\", \"B\"],\n \"date\": [Timestamp(\"2019-01-01\")] * 6,\n }\n )\n\n result = (\n df.groupby(\"group\")[[\"column1\", \"date\"]]\n .rolling(\"1D\", on=\"date\", closed=\"left\")[\"column1\"]\n .sum()\n )\n expected = Series(\n [np.nan, 0.0, 2.0, np.nan, 1.0, 4.0],\n index=MultiIndex.from_tuples(\n [(\"A\", Timestamp(\"2019-01-01\"))] * 3\n + [(\"B\", Timestamp(\"2019-01-01\"))] * 3,\n names=[\"group\", \"date\"],\n ),\n name=\"column1\",\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"func\", [\"max\", \"min\"])\n def test_groupby_rolling_index_changed(self, func):\n # GH: #36018 nlevels of MultiIndex changed\n ds = Series(\n [1, 2, 2],\n index=MultiIndex.from_tuples(\n [(\"a\", \"x\"), (\"a\", \"y\"), (\"c\", \"z\")], names=[\"1\", \"2\"]\n ),\n name=\"a\",\n )\n\n result = getattr(ds.groupby(ds).rolling(2), func)()\n expected = Series(\n [np.nan, np.nan, 2.0],\n index=MultiIndex.from_tuples(\n [(1, \"a\", \"x\"), (2, \"a\", \"y\"), (2, \"c\", \"z\")], names=[\"a\", \"1\", \"2\"]\n ),\n name=\"a\",\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_rolling_empty_frame(self):\n # GH 36197\n expected = DataFrame({\"s1\": []})\n result = expected.groupby(\"s1\").rolling(window=1).sum()\n # GH 32262\n expected = expected.drop(columns=\"s1\")\n # GH-38057 from_tuples gives empty object dtype, we now get float/int levels\n # expected.index = MultiIndex.from_tuples([], names=[\"s1\", None])\n expected.index = MultiIndex.from_product(\n [Index([], dtype=\"float64\"), Index([], dtype=\"int64\")], names=[\"s1\", None]\n )\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame({\"s1\": [], \"s2\": []})\n result = expected.groupby([\"s1\", \"s2\"]).rolling(window=1).sum()\n # GH 32262\n expected = expected.drop(columns=[\"s1\", \"s2\"])\n expected.index = MultiIndex.from_product(\n [\n Index([], dtype=\"float64\"),\n Index([], dtype=\"float64\"),\n Index([], dtype=\"int64\"),\n ],\n names=[\"s1\", \"s2\", None],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_string_index(self):\n # GH: 36727\n df = DataFrame(\n [\n [\"A\", \"group_1\", Timestamp(2019, 1, 1, 9)],\n [\"B\", \"group_1\", Timestamp(2019, 1, 2, 9)],\n [\"Z\", \"group_2\", Timestamp(2019, 1, 3, 9)],\n [\"H\", \"group_1\", Timestamp(2019, 1, 6, 9)],\n [\"E\", \"group_2\", Timestamp(2019, 1, 20, 9)],\n ],\n columns=[\"index\", \"group\", \"eventTime\"],\n ).set_index(\"index\")\n\n groups = df.groupby(\"group\")\n df[\"count_to_date\"] = groups.cumcount()\n rolling_groups = groups.rolling(\"10d\", on=\"eventTime\")\n result = rolling_groups.apply(lambda df: df.shape[0])\n expected = DataFrame(\n [\n [\"A\", \"group_1\", Timestamp(2019, 1, 1, 9), 1.0],\n [\"B\", \"group_1\", Timestamp(2019, 1, 2, 9), 2.0],\n [\"H\", \"group_1\", Timestamp(2019, 1, 6, 9), 3.0],\n [\"Z\", \"group_2\", Timestamp(2019, 1, 3, 9), 1.0],\n [\"E\", \"group_2\", Timestamp(2019, 1, 20, 9), 1.0],\n ],\n columns=[\"index\", \"group\", \"eventTime\", \"count_to_date\"],\n ).set_index([\"group\", \"index\"])\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_no_sort(self):\n # GH 36889\n result = (\n DataFrame({\"foo\": [2, 1], \"bar\": [2, 1]})\n .groupby(\"foo\", sort=False)\n .rolling(1)\n .min()\n )\n expected = DataFrame(\n np.array([[2.0, 2.0], [1.0, 1.0]]),\n columns=[\"foo\", \"bar\"],\n index=MultiIndex.from_tuples([(2, 0), (1, 1)], names=[\"foo\", None]),\n )\n # GH 32262\n expected = expected.drop(columns=\"foo\")\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_count_closed_on(self):\n # GH 35869\n df = DataFrame(\n {\n \"column1\": range(6),\n \"column2\": range(6),\n \"group\": 3 * [\"A\", \"B\"],\n \"date\": date_range(end=\"20190101\", periods=6),\n }\n )\n result = (\n df.groupby(\"group\")\n .rolling(\"3d\", on=\"date\", closed=\"left\")[\"column1\"]\n .count()\n )\n expected = Series(\n [np.nan, 1.0, 1.0, np.nan, 1.0, 1.0],\n name=\"column1\",\n index=MultiIndex.from_tuples(\n [\n (\"A\", Timestamp(\"2018-12-27\")),\n (\"A\", Timestamp(\"2018-12-29\")),\n (\"A\", Timestamp(\"2018-12-31\")),\n (\"B\", Timestamp(\"2018-12-28\")),\n (\"B\", Timestamp(\"2018-12-30\")),\n (\"B\", Timestamp(\"2019-01-01\")),\n ],\n names=[\"group\", \"date\"],\n ),\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n (\"func\", \"kwargs\"),\n [(\"rolling\", {\"window\": 2, \"min_periods\": 1}), (\"expanding\", {})],\n )\n def test_groupby_rolling_sem(self, func, kwargs):\n # GH: 26476\n df = DataFrame(\n [[\"a\", 1], [\"a\", 2], [\"b\", 1], [\"b\", 2], [\"b\", 3]], columns=[\"a\", \"b\"]\n )\n result = getattr(df.groupby(\"a\"), func)(**kwargs).sem()\n expected = DataFrame(\n {\"a\": [np.nan] * 5, \"b\": [np.nan, 0.70711, np.nan, 0.70711, 0.70711]},\n index=MultiIndex.from_tuples(\n [(\"a\", 0), (\"a\", 1), (\"b\", 2), (\"b\", 3), (\"b\", 4)], names=[\"a\", None]\n ),\n )\n # GH 32262\n expected = expected.drop(columns=\"a\")\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n (\"rollings\", \"key\"), [({\"on\": \"a\"}, \"a\"), ({\"on\": None}, \"index\")]\n )\n def test_groupby_rolling_nans_in_index(self, rollings, key):\n # GH: 34617\n df = DataFrame(\n {\n \"a\": to_datetime([\"2020-06-01 12:00\", \"2020-06-01 14:00\", np.nan]),\n \"b\": [1, 2, 3],\n \"c\": [1, 1, 1],\n }\n )\n if key == \"index\":\n df = df.set_index(\"a\")\n with pytest.raises(ValueError, match=f\"{key} must be monotonic\"):\n df.groupby(\"c\").rolling(\"60min\", **rollings)\n\n @pytest.mark.parametrize(\"group_keys\", [True, False])\n def test_groupby_rolling_group_keys(self, group_keys):\n # GH 37641\n # GH 38523: GH 37641 actually was not a bug.\n # group_keys only applies to groupby.apply directly\n arrays = [[\"val1\", \"val1\", \"val2\"], [\"val1\", \"val1\", \"val2\"]]\n index = MultiIndex.from_arrays(arrays, names=(\"idx1\", \"idx2\"))\n\n s = Series([1, 2, 3], index=index)\n result = s.groupby([\"idx1\", \"idx2\"], group_keys=group_keys).rolling(1).mean()\n expected = Series(\n [1.0, 2.0, 3.0],\n index=MultiIndex.from_tuples(\n [\n (\"val1\", \"val1\", \"val1\", \"val1\"),\n (\"val1\", \"val1\", \"val1\", \"val1\"),\n (\"val2\", \"val2\", \"val2\", \"val2\"),\n ],\n names=[\"idx1\", \"idx2\", \"idx1\", \"idx2\"],\n ),\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_rolling_index_level_and_column_label(self):\n # The groupby keys should not appear as a resulting column\n arrays = [[\"val1\", \"val1\", \"val2\"], [\"val1\", \"val1\", \"val2\"]]\n index = MultiIndex.from_arrays(arrays, names=(\"idx1\", \"idx2\"))\n\n df = DataFrame({\"A\": [1, 1, 2], \"B\": range(3)}, index=index)\n result = df.groupby([\"idx1\", \"A\"]).rolling(1).mean()\n expected = DataFrame(\n {\"B\": [0.0, 1.0, 2.0]},\n index=MultiIndex.from_tuples(\n [\n (\"val1\", 1, \"val1\", \"val1\"),\n (\"val1\", 1, \"val1\", \"val1\"),\n (\"val2\", 2, \"val2\", \"val2\"),\n ],\n names=[\"idx1\", \"A\", \"idx1\", \"idx2\"],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_resulting_multiindex(self):\n # a few different cases checking the created MultiIndex of the result\n # https://github.com/pandas-dev/pandas/pull/38057\n\n # grouping by 1 columns -> 2-level MI as result\n df = DataFrame({\"a\": np.arange(8.0), \"b\": [1, 2] * 4})\n result = df.groupby(\"b\").rolling(3).mean()\n expected_index = MultiIndex.from_tuples(\n [(1, 0), (1, 2), (1, 4), (1, 6), (2, 1), (2, 3), (2, 5), (2, 7)],\n names=[\"b\", None],\n )\n tm.assert_index_equal(result.index, expected_index)\n\n # grouping by 2 columns -> 3-level MI as result\n df = DataFrame({\"a\": np.arange(12.0), \"b\": [1, 2] * 6, \"c\": [1, 2, 3, 4] * 3})\n result = df.groupby([\"b\", \"c\"]).rolling(2).sum()\n expected_index = MultiIndex.from_tuples(\n [\n (1, 1, 0),\n (1, 1, 4),\n (1, 1, 8),\n (1, 3, 2),\n (1, 3, 6),\n (1, 3, 10),\n (2, 2, 1),\n (2, 2, 5),\n (2, 2, 9),\n (2, 4, 3),\n (2, 4, 7),\n (2, 4, 11),\n ],\n names=[\"b\", \"c\", None],\n )\n tm.assert_index_equal(result.index, expected_index)\n\n # grouping with 1 level on dataframe with 2-level MI -> 3-level MI as result\n df = DataFrame({\"a\": np.arange(8.0), \"b\": [1, 2] * 4, \"c\": [1, 2, 3, 4] * 2})\n df = df.set_index(\"c\", append=True)\n result = df.groupby(\"b\").rolling(3).mean()\n expected_index = MultiIndex.from_tuples(\n [\n (1, 0, 1),\n (1, 2, 3),\n (1, 4, 1),\n (1, 6, 3),\n (2, 1, 2),\n (2, 3, 4),\n (2, 5, 2),\n (2, 7, 4),\n ],\n names=[\"b\", None, \"c\"],\n )\n tm.assert_index_equal(result.index, expected_index)\n\n def test_groupby_rolling_object_doesnt_affect_groupby_apply(self):\n # GH 39732\n g = self.frame.groupby(\"A\")\n expected = g.apply(lambda x: x.rolling(4).sum()).index\n _ = g.rolling(window=4)\n result = g.apply(lambda x: x.rolling(4).sum()).index\n tm.assert_index_equal(result, expected)\n assert not g.mutated\n assert not g.grouper.mutated\n\n @pytest.mark.parametrize(\n (\"window\", \"min_periods\", \"closed\", \"expected\"),\n [\n (2, 0, \"left\", [None, 0.0, 1.0, 1.0, None, 0.0, 1.0, 1.0]),\n (2, 2, \"left\", [None, None, 1.0, 1.0, None, None, 1.0, 1.0]),\n (4, 4, \"left\", [None, None, None, None, None, None, None, None]),\n (4, 4, \"right\", [None, None, None, 5.0, None, None, None, 5.0]),\n ],\n )\n def test_groupby_rolling_var(self, window, min_periods, closed, expected):\n df = DataFrame([1, 2, 3, 4, 5, 6, 7, 8])\n result = (\n df.groupby([1, 2, 1, 2, 1, 2, 1, 2])\n .rolling(window=window, min_periods=min_periods, closed=closed)\n .var(0)\n )\n expected_result = DataFrame(\n np.array(expected, dtype=\"float64\"),\n index=MultiIndex(\n levels=[[1, 2], [0, 1, 2, 3, 4, 5, 6, 7]],\n codes=[[0, 0, 0, 0, 1, 1, 1, 1], [0, 2, 4, 6, 1, 3, 5, 7]],\n ),\n )\n tm.assert_frame_equal(result, expected_result)\n\n @pytest.mark.parametrize(\n \"columns\", [MultiIndex.from_tuples([(\"A\", \"\"), (\"B\", \"C\")]), [\"A\", \"B\"]]\n )\n def test_by_column_not_in_values(self, columns):\n # GH 32262\n df = DataFrame([[1, 0]] * 20 + [[2, 0]] * 12 + [[3, 0]] * 8, columns=columns)\n g = df.groupby(\"A\")\n original_obj = g.obj.copy(deep=True)\n r = g.rolling(4)\n result = r.sum()\n assert \"A\" not in result.columns\n tm.assert_frame_equal(g.obj, original_obj)\n\n def test_groupby_level(self):\n # GH 38523, 38787\n arrays = [\n [\"Falcon\", \"Falcon\", \"Parrot\", \"Parrot\"],\n [\"Captive\", \"Wild\", \"Captive\", \"Wild\"],\n ]\n index = MultiIndex.from_arrays(arrays, names=(\"Animal\", \"Type\"))\n df = DataFrame({\"Max Speed\": [390.0, 350.0, 30.0, 20.0]}, index=index)\n result = df.groupby(level=0)[\"Max Speed\"].rolling(2).sum()\n expected = Series(\n [np.nan, 740.0, np.nan, 50.0],\n index=MultiIndex.from_tuples(\n [\n (\"Falcon\", \"Falcon\", \"Captive\"),\n (\"Falcon\", \"Falcon\", \"Wild\"),\n (\"Parrot\", \"Parrot\", \"Captive\"),\n (\"Parrot\", \"Parrot\", \"Wild\"),\n ],\n names=[\"Animal\", \"Animal\", \"Type\"],\n ),\n name=\"Max Speed\",\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"by, expected_data\",\n [\n [[\"id\"], {\"num\": [100.0, 150.0, 150.0, 200.0]}],\n [\n [\"id\", \"index\"],\n {\n \"date\": [\n Timestamp(\"2018-01-01\"),\n Timestamp(\"2018-01-02\"),\n Timestamp(\"2018-01-01\"),\n Timestamp(\"2018-01-02\"),\n ],\n \"num\": [100.0, 200.0, 150.0, 250.0],\n },\n ],\n ],\n )\n def test_as_index_false(self, by, expected_data):\n # GH 39433\n data = [\n [\"A\", \"2018-01-01\", 100.0],\n [\"A\", \"2018-01-02\", 200.0],\n [\"B\", \"2018-01-01\", 150.0],\n [\"B\", \"2018-01-02\", 250.0],\n ]\n df = DataFrame(data, columns=[\"id\", \"date\", \"num\"])\n df[\"date\"] = to_datetime(df[\"date\"])\n df = df.set_index([\"date\"])\n\n gp_by = [getattr(df, attr) for attr in by]\n result = (\n df.groupby(gp_by, as_index=False).rolling(window=2, min_periods=1).mean()\n )\n\n expected = {\"id\": [\"A\", \"A\", \"B\", \"B\"]}\n expected.update(expected_data)\n expected = DataFrame(\n expected,\n index=df.index,\n )\n tm.assert_frame_equal(result, expected)\n\n\nclass TestExpanding:\n def setup_method(self):\n self.frame = DataFrame({\"A\": [1] * 20 + [2] * 12 + [3] * 8, \"B\": np.arange(40)})\n\n @pytest.mark.parametrize(\n \"f\", [\"sum\", \"mean\", \"min\", \"max\", \"count\", \"kurt\", \"skew\"]\n )\n def test_expanding(self, f):\n g = self.frame.groupby(\"A\")\n r = g.expanding()\n\n result = getattr(r, f)()\n expected = g.apply(lambda x: getattr(x.expanding(), f)())\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop(\"A\", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([self.frame[\"A\"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"f\", [\"std\", \"var\"])\n def test_expanding_ddof(self, f):\n g = self.frame.groupby(\"A\")\n r = g.expanding()\n\n result = getattr(r, f)(ddof=0)\n expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0))\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop(\"A\", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([self.frame[\"A\"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"interpolation\", [\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"]\n )\n def test_expanding_quantile(self, interpolation):\n g = self.frame.groupby(\"A\")\n r = g.expanding()\n\n result = r.quantile(0.4, interpolation=interpolation)\n expected = g.apply(\n lambda x: x.expanding().quantile(0.4, interpolation=interpolation)\n )\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop(\"A\", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([self.frame[\"A\"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"f\", [\"corr\", \"cov\"])\n def test_expanding_corr_cov(self, f):\n g = self.frame.groupby(\"A\")\n r = g.expanding()\n\n result = getattr(r, f)(self.frame)\n\n def func(x):\n return getattr(x.expanding(), f)(self.frame)\n\n expected = g.apply(func)\n # GH 39591: groupby.apply returns 1 instead of nan for windows\n # with all nan values\n null_idx = list(range(20, 61)) + list(range(72, 113))\n expected.iloc[null_idx, 1] = np.nan\n # GH 39591: The grouped column should be all np.nan\n # (groupby.apply inserts 0s for cov)\n expected[\"A\"] = np.nan\n tm.assert_frame_equal(result, expected)\n\n result = getattr(r.B, f)(pairwise=True)\n\n def func(x):\n return getattr(x.B.expanding(), f)(pairwise=True)\n\n expected = g.apply(func)\n tm.assert_series_equal(result, expected)\n\n def test_expanding_apply(self, raw):\n g = self.frame.groupby(\"A\")\n r = g.expanding()\n\n # reduction\n result = r.apply(lambda x: x.sum(), raw=raw)\n expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw))\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop(\"A\", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([self.frame[\"A\"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n\nclass TestEWM:\n @pytest.mark.parametrize(\n \"method, expected_data\",\n [\n [\"mean\", [0.0, 0.6666666666666666, 1.4285714285714286, 2.2666666666666666]],\n [\"std\", [np.nan, 0.707107, 0.963624, 1.177164]],\n [\"var\", [np.nan, 0.5, 0.9285714285714286, 1.3857142857142857]],\n ],\n )\n def test_methods(self, method, expected_data):\n # GH 16037\n df = DataFrame({\"A\": [\"a\"] * 4, \"B\": range(4)})\n result = getattr(df.groupby(\"A\").ewm(com=1.0), method)()\n expected = DataFrame(\n {\"B\": expected_data},\n index=MultiIndex.from_tuples(\n [\n (\"a\", 0),\n (\"a\", 1),\n (\"a\", 2),\n (\"a\", 3),\n ],\n names=[\"A\", None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n expected = df.groupby(\"A\").apply(lambda x: getattr(x.ewm(com=1.0), method)())\n # There may be a bug in the above statement; not returning the correct index\n tm.assert_frame_equal(result.reset_index(drop=True), expected)\n\n @pytest.mark.parametrize(\n \"method, expected_data\",\n [[\"corr\", [np.nan, 1.0, 1.0, 1]], [\"cov\", [np.nan, 0.5, 0.928571, 1.385714]]],\n )\n def test_pairwise_methods(self, method, expected_data):\n # GH 16037\n df = DataFrame({\"A\": [\"a\"] * 4, \"B\": range(4)})\n result = getattr(df.groupby(\"A\").ewm(com=1.0), method)()\n expected = DataFrame(\n {\"B\": expected_data},\n index=MultiIndex.from_tuples(\n [\n (\"a\", 0, \"B\"),\n (\"a\", 1, \"B\"),\n (\"a\", 2, \"B\"),\n (\"a\", 3, \"B\"),\n ],\n names=[\"A\", None, None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n expected = df.groupby(\"A\").apply(lambda x: getattr(x.ewm(com=1.0), method)())\n tm.assert_frame_equal(result, expected)\n\n def test_times(self, times_frame):\n # GH 40951\n halflife = \"23 days\"\n result = times_frame.groupby(\"A\").ewm(halflife=halflife, times=\"C\").mean()\n expected = DataFrame(\n {\n \"B\": [\n 0.0,\n 0.507534,\n 1.020088,\n 1.537661,\n 0.0,\n 0.567395,\n 1.221209,\n 0.0,\n 0.653141,\n 1.195003,\n ]\n },\n index=MultiIndex.from_tuples(\n [\n (\"a\", 0),\n (\"a\", 3),\n (\"a\", 6),\n (\"a\", 9),\n (\"b\", 1),\n (\"b\", 4),\n (\"b\", 7),\n (\"c\", 2),\n (\"c\", 5),\n (\"c\", 8),\n ],\n names=[\"A\", None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_times_vs_apply(self, times_frame):\n # GH 40951\n halflife = \"23 days\"\n result = times_frame.groupby(\"A\").ewm(halflife=halflife, times=\"C\").mean()\n expected = (\n times_frame.groupby(\"A\")\n .apply(lambda x: x.ewm(halflife=halflife, times=\"C\").mean())\n .iloc[[0, 3, 6, 9, 1, 4, 7, 2, 5, 8]]\n .reset_index(drop=True)\n )\n tm.assert_frame_equal(result.reset_index(drop=True), expected)\n\n def test_times_array(self, times_frame):\n # GH 40951\n halflife = \"23 days\"\n result = times_frame.groupby(\"A\").ewm(halflife=halflife, times=\"C\").mean()\n expected = (\n times_frame.groupby(\"A\")\n .ewm(halflife=halflife, times=times_frame[\"C\"].values)\n .mean()\n )\n tm.assert_frame_equal(result, expected)\n" ]
[ [ "pandas.to_datetime", "numpy.array", "pandas.Index", "pandas.DataFrame", "pandas.date_range", "pandas.MultiIndex.from_tuples", "pandas.core.groupby.groupby.get_groupby", "pandas._testing.assert_frame_equal", "pandas.MultiIndex.from_arrays", "pandas.Timestamp", "numpy.arange", "pandas.MultiIndex", "pandas.Series", "pandas._testing.assert_index_equal", "pandas._testing.assert_series_equal" ] ]
shib0li/BMBO-DARN
[ "68f889ac8a1a2051754920fabff9e56f667b1cab" ]
[ "main.py" ]
[ "import fire\nimport numpy as np\nimport torch\nimport os \nimport pickle5 as pickle\nimport subprocess\nfrom datetime import datetime\nfrom time import time\n\nimport hamiltorch\n# import logging\n# import ExpConfigs as exp_configs\n# import Misc as misc\n# import data.Dataset as Dataset\n# import data.Functions as functions\n\n\nfrom utils import ExpConfigs as exp_configs\nfrom utils import Misc as misc\nfrom data import Dataset as Dataset\nfrom functionals import Functions as functions\n# from baselines import SMAC3\n# from baselines import SMAC4\n# from baselines import Hyperband\n# from baselines import BOHB\n# from baselines import SHPO\n\n# import Hamilton as hamilton\n\nfrom core import Model\nfrom core import Inference\nfrom core import BayesOpt\n\nfrom tqdm.auto import trange, tqdm\n\n\nMF_DNN_APPROACH = ['dnn_mf_bo']\nSINGLE_BASED_APPROACH = ['mf_hmc_cs', 'mf_hmc_ucs', 'mf_hmc_fix_low', 'mf_hmc_fix_high']\nPAR_HMC_BASED_APPROACH = ['par_hmc_cs', 'par_hmc_ucs']\nBATCH_HMC_BASED_APPROACH = ['ratio_batch_hmc_cs', 'ratio_batch_hmc_ucs', 'bound_ratio_batch_hmc_cs']\nAO_HMC_BASED_APPROACH = ['ao_batch_hmc_cs', 'ao_batch_hmc_ucs']\nRANDOM_APPROACH = ['full_random']\nMF_GP_BASED_APPROACH = ['mf_gp_ucb', 'mf_mes', 'par_mf_mes']\nSMAC_APPROACH = ['smac', 'gp_kernel', 'hyperband', 'bohb']\nMT_APPROACH = ['mtbo']\nGP_TS_APPROACH = ['gp_ts']\n\ndef create_path(path): \n try:\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n #\n print(\"Directory '%s' created successfully\" % (path))\n except OSError as error:\n print(\"Directory '%s' can not be created\" % (path))\n #\n\ndef parse_exp_configs(kwargs):\n\n configs = {}\n \n opt_config = exp_configs.default_optimization_config()\n opt_config._parse(kwargs)\n\n domain_config = exp_configs.default_domain_config()\n domain_config._parse(kwargs)\n \n configs['opt_config'] = opt_config\n configs['domain_config'] = domain_config\n \n all_methods = MF_DNN_APPROACH + SINGLE_BASED_APPROACH + PAR_HMC_BASED_APPROACH + BATCH_HMC_BASED_APPROACH +\\\n RANDOM_APPROACH + MF_GP_BASED_APPROACH + SMAC_APPROACH + MT_APPROACH + GP_TS_APPROACH +\\\n AO_HMC_BASED_APPROACH\n \n sampling_methods = MF_DNN_APPROACH + SINGLE_BASED_APPROACH + PAR_HMC_BASED_APPROACH + BATCH_HMC_BASED_APPROACH +\\\n RANDOM_APPROACH + AO_HMC_BASED_APPROACH\n \n if opt_config.algorithm_name not in all_methods:\n raise Exception(\"ERROR: \"+opt_config.algorithm_name+\" NOT implemented.\")\n \n \n if opt_config.algorithm_name in sampling_methods:\n hmc_sampler_config = exp_configs.default_hmc_sampler_config()\n hmc_sampler_config._parse(kwargs)\n configs['hmc_config'] = hmc_sampler_config\n #\n \n method_config = None\n mf_nn_surrogate_config = None\n if opt_config.algorithm_name in SINGLE_BASED_APPROACH:\n #\n method_config = exp_configs.default_mf_hmc_single_config()\n mf_nn_surrogate_config = exp_configs.default_mf_nn_surrogate_config()\n method_config._parse(kwargs)\n mf_nn_surrogate_config._parse(kwargs)\n #\n elif opt_config.algorithm_name in RANDOM_APPROACH:\n #\n method_config = exp_configs.default_mf_hmc_single_config()\n mf_nn_surrogate_config = exp_configs.default_mf_nn_surrogate_config()\n method_config._parse(kwargs)\n mf_nn_surrogate_config._parse(kwargs)\n #\n if opt_config.algorithm_name in MF_DNN_APPROACH:\n #\n method_config = exp_configs.default_mf_hmc_single_config()\n mf_nn_surrogate_config = exp_configs.default_mf_nn_surrogate_config()\n method_config._parse(kwargs)\n mf_nn_surrogate_config._parse(kwargs)\n #\n elif opt_config.algorithm_name in PAR_HMC_BASED_APPROACH:\n #\n method_config = exp_configs.default_mf_hmc_parallel_config()\n mf_nn_surrogate_config = exp_configs.default_mf_nn_surrogate_config()\n method_config._parse(kwargs)\n mf_nn_surrogate_config._parse(kwargs)\n #\n elif opt_config.algorithm_name in BATCH_HMC_BASED_APPROACH:\n #\n method_config = exp_configs.default_mf_hmc_batch_config()\n mf_nn_surrogate_config = exp_configs.default_mf_nn_surrogate_config()\n method_config._parse(kwargs)\n mf_nn_surrogate_config._parse(kwargs)\n #\n elif opt_config.algorithm_name in AO_HMC_BASED_APPROACH:\n #\n method_config = exp_configs.default_mf_hmc_batch_config()\n mf_nn_surrogate_config = exp_configs.default_mf_nn_surrogate_config()\n method_config._parse(kwargs)\n mf_nn_surrogate_config._parse(kwargs)\n #\n# elif opt_config.algorithm_name in RAND_HMC_BASED_APPROACH:\n# #\n# method_config = exp_configs.default_mf_hmc_random_config()\n# mf_nn_surrogate_config = exp_configs.default_mf_nn_surrogate_config()\n# method_config._parse(kwargs)\n# mf_nn_surrogate_config._parse(kwargs)\n# #\n elif opt_config.algorithm_name in MT_APPROACH:\n method_config = exp_configs.default_mtbo_config()\n method_config._parse(kwargs)\n mf_nn_surrogate_config = exp_configs.default_mf_nn_surrogate_config()\n #\n \n configs['method_config'] = method_config\n configs['mf_nn_surrogate_config'] = mf_nn_surrogate_config\n \n return configs\n\ndef experiment_mf_dnn(dataset, method_config, mf_nn_surrogate_config, hmc_sampler_config, horizon, res_path, trial_id):\n \n layers = misc.seq_auto_regressive_layers(\n dataset.in_dim, dataset.out_dim, \n mf_nn_surrogate_config.hidden_depths,\n mf_nn_surrogate_config.hidden_widths,\n )\n sampling = {\n 'step_size':hmc_sampler_config.step_size, \n 'L':hmc_sampler_config.L, \n 'burn':hmc_sampler_config.burn, \n 'Ns':hmc_sampler_config.Ns\n }\n \n res = {}\n res['hist_argm'] = []\n res['hist_argx'] = []\n res['hist_yq'] = []\n res['hist_y_ground'] = []\n res['hist_config'] = []\n res['hist_t_fit'] = []\n res['hist_t_acq'] = []\n res['hist_t_query_m'] = []\n res['hist_t_query_h'] = []\n \n pickle_name = os.path.join(res_path, 'trial'+str(trial_id)+'.pickle')\n \n log_file_name = os.path.join(res_path, 'logs_trial'+str(trial_id)+'.txt')\n logger = open(log_file_name, 'w+') \n logger.write('===============================================\\n')\n logger.write(' Experiment with DNN-MFBO \\n')\n logger.write('===============================================\\n')\n logger.write('Experiment start at: '+datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.flush()\n \n exp_t_start = time()\n \n for t in trange(horizon, desc='experiment', leave=True):\n \n t_trial_start = time()\n \n model = Model.SeqAutoRegressive(layers, mf_nn_surrogate_config.activation, \n torch.device(mf_nn_surrogate_config.surrogate_placement))\n hamil_opt = BayesOpt.HamilBayesOpt(model, dataset, sampling)\n hmc_samples = hamil_opt.fit(constraint=False)\n \n t_fit = time()\n\n argm, argx = hamil_opt.info_gain_step(hmc_samples)\n \n t_acq = time()\n \n np_argm = argm.data.cpu().numpy()\n np_argx = argx.data.cpu().numpy()\n\n #yq, y_ground, success, t_query_m, t_query_h = dataset.add(np_argx, np_argm, scaled_input=True)\n yq, y_ground, success, config, t_query_m, t_query_h = dataset.add_interpret(np_argx, np_argm, scaled_input=True)\n \n t_query = time()\n \n if success:\n res['hist_argm'].append(np_argm)\n res['hist_argx'].append(np_argx)\n res['hist_yq'].append(yq)\n res['hist_y_ground'].append(y_ground)\n res['hist_config'].append(config)\n res['hist_t_fit'].append(t_fit-t_trial_start)\n res['hist_t_acq'].append(t_acq-t_fit)\n res['hist_t_query_m'].append(t_query_m)\n res['hist_t_query_h'].append(t_query_h) \n\n logger.write('* Optimization step'+str(t+1)+' finished at ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.write(' - argm = '+str(np_argm)+'\\n')\n logger.write(' - argx = '+np.array2string(np_argx)+'\\n')\n logger.write(' - t_fit_surrogate = '+str(t_fit-t_trial_start)+' secs\\n')\n \n logger.write(' - t_acq = '+str(t_acq-t_fit)+' secs\\n')\n logger.write(' - t_query_m = '+str(t_query_m)+' secs\\n')\n logger.write(' - t_query_h = '+str(t_query_h)+' secs\\n')\n \n logger.write(' - t_query = '+str(t_query-t_acq)+' secs\\n')\n logger.write(' - total_elapsed = '+str(t_query-exp_t_start)+' secs\\n')\n logger.flush()\n else:\n logger.write('* Optimization step'+str(t+1)+' FAILED at ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.write(' - argm = '+str(np_argm)+'\\n')\n logger.write(' - argx = '+np.array2string(np_argx)+'\\n')\n logger.flush()\n \n with open(pickle_name, 'wb') as handle:\n pickle.dump(res, handle, protocol=pickle.HIGHEST_PROTOCOL)\n #\n\n #\n \n logger.close()\n\n\ndef experiment_single_hmc(dataset, method_config, mf_nn_surrogate_config, hmc_sampler_config, horizon, res_path, trial_id):\n \n layers = misc.full_auto_regressive_layers(\n dataset.in_dim, dataset.out_dim, \n mf_nn_surrogate_config.hidden_depths,\n mf_nn_surrogate_config.hidden_widths,\n )\n sampling = {\n 'step_size':hmc_sampler_config.step_size, \n 'L':hmc_sampler_config.L, \n 'burn':hmc_sampler_config.burn, \n 'Ns':hmc_sampler_config.Ns\n }\n \n res = {}\n res['hist_argm'] = []\n res['hist_argx'] = []\n res['hist_yq'] = []\n res['hist_y_ground'] = []\n res['hist_config'] = []\n res['hist_t_fit'] = []\n res['hist_t_acq'] = []\n res['hist_t_query_m'] = []\n res['hist_t_query_h'] = []\n \n pickle_name = os.path.join(res_path, 'trial'+str(trial_id)+'.pickle')\n \n log_file_name = os.path.join(res_path, 'logs_trial'+str(trial_id)+'.txt')\n logger = open(log_file_name, 'w+') \n logger.write('===============================================\\n')\n logger.write(' Experiment with Single-Constrained \\n')\n logger.write('===============================================\\n')\n logger.write('Experiment start at: '+datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.flush()\n \n exp_t_start = time()\n \n for t in trange(horizon, desc='experiment', leave=True):\n \n t_trial_start = time()\n \n model = Model.FullAutoRegressive(layers, mf_nn_surrogate_config.activation, \n torch.device(mf_nn_surrogate_config.surrogate_placement))\n hamil_opt = BayesOpt.HamilBayesOpt(model, dataset, sampling)\n hmc_samples = hamil_opt.fit(constraint=method_config.constraint)\n \n t_fit = time()\n \n if method_config.fixed_fidelity is not None:\n argm, argx = hamil_opt.fixed_fidelity_step(hmc_samples, method_config.fixed_fidelity)\n np_argm = argm\n else:\n argm, argx = hamil_opt.info_gain_step(hmc_samples)\n np_argm = argm.data.cpu().numpy()\n #\n np_argx = argx.data.cpu().numpy()\n \n t_acq = time()\n \n #yq, y_ground, success = dataset.add(np_argx, np_argm, scaled_input=True)\n yq, y_ground, success, config, t_query_m, t_query_h = dataset.add_interpret(np_argx, np_argm, scaled_input=True)\n \n t_query = time()\n \n if success:\n res['hist_argm'].append(np_argm)\n res['hist_argx'].append(np_argx)\n res['hist_yq'].append(yq)\n res['hist_y_ground'].append(y_ground)\n res['hist_config'].append(config)\n res['hist_t_fit'].append(t_fit-t_trial_start)\n res['hist_t_acq'].append(t_acq-t_fit)\n res['hist_t_query_m'].append(t_query_m)\n res['hist_t_query_h'].append(t_query_h) \n \n logger.write('* Optimization step'+str(t+1)+' finished at ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.write(' - argm = '+str(np_argm)+'\\n')\n logger.write(' - argx = '+np.array2string(np_argx)+'\\n')\n logger.write(' - t_fit_surrogate = '+str(t_fit-t_trial_start)+' secs\\n')\n \n logger.write(' - t_acq = '+str(t_acq-t_fit)+' secs\\n')\n logger.write(' - t_query_m = '+str(t_query_m)+' secs\\n')\n logger.write(' - t_query_h = '+str(t_query_h)+' secs\\n')\n \n logger.write(' - t_query = '+str(t_query-t_acq)+' secs\\n')\n logger.write(' - total_elapsed = '+str(t_query-exp_t_start)+' secs\\n')\n logger.flush()\n else:\n logger.write('* Optimization step'+str(t+1)+' FAILED at ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.write(' - argm = '+str(np_argm)+'\\n')\n logger.write(' - argx = '+np.array2string(np_argx)+'\\n')\n logger.flush()\n \n \n with open(pickle_name, 'wb') as handle:\n pickle.dump(res, handle, protocol=pickle.HIGHEST_PROTOCOL)\n #\n #\n \n logger.close()\n \n \n \ndef experiment_random(dataset, method_config, mf_nn_surrogate_config, hmc_sampler_config, horizon, res_path, trial_id):\n \n #print('Random searching heuristics')\n layers = misc.full_auto_regressive_layers(\n dataset.in_dim, dataset.out_dim, \n mf_nn_surrogate_config.hidden_depths,\n mf_nn_surrogate_config.hidden_widths,\n )\n sampling = {\n 'step_size':hmc_sampler_config.step_size, \n 'L':hmc_sampler_config.L, \n 'burn':hmc_sampler_config.burn, \n 'Ns':hmc_sampler_config.Ns\n }\n \n res = {}\n res['hist_argm'] = []\n res['hist_argx'] = []\n res['hist_yq'] = []\n res['hist_y_ground'] = []\n res['hist_config'] = []\n res['hist_t_fit'] = []\n res['hist_t_acq'] = []\n res['hist_t_query_m'] = []\n res['hist_t_query_h'] = []\n \n pickle_name = os.path.join(res_path, 'trial'+str(trial_id)+'.pickle')\n \n log_file_name = os.path.join(res_path, 'logs_trial'+str(trial_id)+'.txt')\n logger = open(log_file_name, 'w+') \n logger.write('===============================================\\n')\n logger.write(' Experiment with Full-Random \\n')\n logger.write('===============================================\\n')\n logger.write('Experiment start at: '+datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.flush()\n \n exp_t_start = time()\n \n for t in trange(horizon, desc='experiment', leave=True):\n \n t_trial_start = time()\n \n model = Model.FullAutoRegressive(layers, mf_nn_surrogate_config.activation, \n torch.device(mf_nn_surrogate_config.surrogate_placement))\n hamil_opt = BayesOpt.HamilBayesOpt(model, dataset, sampling)\n hmc_samples = hamil_opt.fit(constraint=method_config.constraint)\n \n t_fit = time()\n \n np_argm = np.random.randint(0, model.M)\n np_argx = misc.generate_random_inputs(1, dataset.in_dim, dataset.lb, dataset.ub, seed=np.random.randint(0,100000))\n \n t_acq = time()\n \n #yq, y_ground, success, t_query_m, t_query_h = dataset.add(np_argx, np_argm, scaled_input=False)\n yq, y_ground, success, config, t_query_m, t_query_h = dataset.add_interpret(np_argx, np_argm, scaled_input=False)\n \n t_query = time()\n \n if success:\n res['hist_argm'].append(np_argm)\n res['hist_argx'].append(np_argx)\n res['hist_yq'].append(yq)\n res['hist_y_ground'].append(y_ground)\n res['hist_config'].append(config)\n res['hist_t_fit'].append(t_fit-t_trial_start)\n res['hist_t_acq'].append(t_acq-t_fit)\n res['hist_t_query_m'].append(t_query_m)\n res['hist_t_query_h'].append(t_query_h) \n\n logger.write('* Optimization step'+str(t+1)+' finished at ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.write(' - argm = '+str(np_argm)+'\\n')\n logger.write(' - argx = '+np.array2string(np_argx)+'\\n')\n logger.write(' - t_fit_surrogate = '+str(t_fit-t_trial_start)+' secs\\n')\n \n logger.write(' - t_acq = '+str(t_acq-t_fit)+' secs\\n')\n logger.write(' - t_query_m = '+str(t_query_m)+' secs\\n')\n logger.write(' - t_query_h = '+str(t_query_h)+' secs\\n')\n \n logger.write(' - t_query = '+str(t_query-t_acq)+' secs\\n')\n logger.write(' - total_elapsed = '+str(t_query-exp_t_start)+' secs\\n')\n logger.flush()\n else:\n logger.write('* Optimization step'+str(t+1)+' FAILED at ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.write(' - argm = '+str(np_argm)+'\\n')\n logger.write(' - argx = '+np.array2string(np_argx)+'\\n')\n logger.flush()\n \n with open(pickle_name, 'wb') as handle:\n pickle.dump(res, handle, protocol=pickle.HIGHEST_PROTOCOL)\n #\n #\n logger.close()\n \n \ndef experiment_par_hmc(dataset, method_config, mf_nn_surrogate_config, hmc_sampler_config, horizon, res_path, trial_id):\n \n layers = misc.full_auto_regressive_layers(\n dataset.in_dim, dataset.out_dim, \n mf_nn_surrogate_config.hidden_depths,\n mf_nn_surrogate_config.hidden_widths,\n )\n sampling = {\n 'step_size':hmc_sampler_config.step_size, \n 'L':hmc_sampler_config.L, \n 'burn':hmc_sampler_config.burn, \n 'Ns':hmc_sampler_config.Ns\n }\n \n res = {}\n res['hist_argm'] = []\n res['hist_argx'] = []\n res['hist_yq'] = []\n res['hist_y_ground'] = []\n res['hist_config'] = []\n res['hist_t_fit'] = []\n res['hist_t_acq'] = []\n res['hist_t_query_m'] = []\n res['hist_t_query_h'] = []\n \n pickle_name = os.path.join(res_path, 'trial'+str(trial_id)+'.pickle')\n \n log_file_name = os.path.join(res_path, 'logs_trial'+str(trial_id)+'.txt')\n logger = open(log_file_name, 'w+') \n logger.write('===============================================\\n')\n logger.write(' Experiment with Parallel HMC \\n')\n logger.write('===============================================\\n')\n logger.write('Experiment start at: '+datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.flush()\n \n exp_t_start = time()\n \n for t in trange(horizon, desc='experiment', leave=True):\n \n t_trial_start = time()\n \n model = Model.FullAutoRegressive(layers, mf_nn_surrogate_config.activation, \n torch.device(mf_nn_surrogate_config.surrogate_placement))\n hamil_opt = BayesOpt.HamilBayesOpt(model, dataset, sampling)\n hmc_samples = hamil_opt.fit(constraint=method_config.constraint)\n \n t_fit = time()\n \n pool_argm, pool_argx = hamil_opt.pseudo_par_step(hmc_samples, method_config.n_threads)\n \n t_acq = time()\n \n np_pool_argx = []\n np_pool_argm = []\n for argm, argx in zip(pool_argm, pool_argx):\n np_pool_argx.append(argx.data.cpu().numpy())\n np_pool_argm.append(argm.data.cpu().numpy())\n #\n #pool_yq, pool_y_ground, pool_success = dataset.add_pool(np_pool_argx, np_pool_argm, scaled_input=True)\n pool_yq, pool_y_ground, pool_success, pool_config, t_query_m_pool, t_query_h_pool =\\\n dataset.add_pool_interpret(np_pool_argx, np_pool_argm, scaled_input=True)\n \n #print(pool_config)\n \n t_query = time()\n \n logger.write('* Optimization step'+str(t+1)+' finished at ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n \n pool_size = len(pool_success)\n \n res['hist_t_fit'].append(t_fit-t_trial_start)\n res['hist_t_acq'].append(t_acq-t_fit)\n for i in range(pool_size):\n success = pool_success[i]\n if success:\n res['hist_argm'].append(np_pool_argm[i])\n res['hist_argx'].append(np_pool_argx[i])\n res['hist_yq'].append(pool_yq[i])\n res['hist_y_ground'].append(pool_y_ground[i])\n res['hist_config'].append(pool_config[i])\n res['hist_t_query_m'].append(t_query_m_pool[i])\n res['hist_t_query_h'].append(t_query_h_pool[i])\n logger.write(' - Success add argm = '+np.array2string(np.array(np_pool_argm[i]))+'\\n')\n logger.write(' - Success add argx = '+np.array2string(np.array(np_pool_argx[i]))+'\\n')\n logger.write(' * t_query_m = '+str(t_query_m_pool[i])+' secs\\n')\n logger.write(' * t_query_h = '+str(t_query_h_pool[i])+' secs\\n')\n else:\n logger.write(' - Failed add argm = '+np.array2string(np.array(np_pool_argm[i]))+'\\n')\n logger.write(' - Failed add argx = '+np.array2string(np.array(np_pool_argx[i]))+'\\n')\n #\n #\n\n logger.write(' - t_fit_surrogate = '+str(t_fit-t_trial_start)+' secs\\n')\n logger.write(' - t_acq = '+str(t_acq-t_fit)+' secs\\n')\n logger.write(' - t_query = '+str(t_query-t_acq)+' secs\\n')\n logger.write(' - total_elapsed = '+str(t_query-exp_t_start)+' secs\\n')\n logger.flush()\n\n with open(pickle_name, 'wb') as handle:\n pickle.dump(res, handle, protocol=pickle.HIGHEST_PROTOCOL)\n #\n #\n logger.close()\n \n \ndef experiment_batch_hmc(dataset, method_config, mf_nn_surrogate_config, hmc_sampler_config, horizon, res_path, trial_id):\n \n layers = misc.full_auto_regressive_layers(\n dataset.in_dim, dataset.out_dim, \n mf_nn_surrogate_config.hidden_depths,\n mf_nn_surrogate_config.hidden_widths,\n )\n sampling = {\n 'step_size':hmc_sampler_config.step_size, \n 'L':hmc_sampler_config.L, \n 'burn':hmc_sampler_config.burn, \n 'Ns':hmc_sampler_config.Ns\n }\n \n res = {}\n res['hist_argm'] = []\n res['hist_argx'] = []\n res['hist_yq'] = []\n res['hist_y_ground'] = []\n res['hist_config'] = []\n res['hist_t_fit'] = []\n res['hist_t_acq'] = []\n res['hist_t_query_m'] = []\n res['hist_t_query_h'] = []\n \n pickle_name = os.path.join(res_path, 'trial'+str(trial_id)+'.pickle')\n \n log_file_name = os.path.join(res_path, 'logs_trial'+str(trial_id)+'.txt')\n logger = open(log_file_name, 'w+') \n logger.write('===============================================\\n')\n logger.write(' Experiment with Ratio Batch \\n')\n logger.write('===============================================\\n')\n logger.write('Experiment start at: '+datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.flush()\n \n exp_t_start = time()\n \n\n for t in trange(horizon, desc='experiment', leave=True):\n \n t_trial_start = time()\n \n model = Model.FullAutoRegressive(layers, mf_nn_surrogate_config.activation, \n torch.device(mf_nn_surrogate_config.surrogate_placement))\n hamil_opt = BayesOpt.HamilBayesOpt(model, dataset, sampling)\n hmc_samples = hamil_opt.fit(constraint=method_config.constraint)\n \n t_fit = time()\n \n if method_config.batch_mode == 'ratio':\n pool_argm, pool_argx, _ = hamil_opt.ratio_batch_step(hmc_samples, method_config.batch_size)\n# elif method_config.batch_mode == 'bound_ratio':\n# #print('***** New bounded query *****')\n# pool_argm, pool_argx, _ = hamil_opt.bound_ratio_batch_step(hmc_samples, method_config.batch_size)\n elif method_config.batch_mode == 'linear':\n pool_argm, pool_argx, _ = hamil_opt.linear_batch_step(hmc_samples, method_config.batch_size, method_config.beta)\n #\n \n t_acq = time()\n\n np_pool_argx = []\n np_pool_argm = []\n for argm, argx in zip(pool_argm, pool_argx):\n np_pool_argx.append(argx.data.cpu().numpy())\n np_pool_argm.append(argm.data.cpu().numpy())\n #\n #pool_yq, pool_y_ground, pool_success = dataset.add_pool(np_pool_argx, np_pool_argm, scaled_input=True)\n pool_yq, pool_y_ground, pool_success, pool_config, t_query_m_pool, t_query_h_pool =\\\n dataset.add_pool_interpret(np_pool_argx, np_pool_argm, scaled_input=True)\n \n t_query = time()\n \n logger.write('* Optimization step'+str(t+1)+' finished at ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n \n pool_size = len(pool_success)\n \n res['hist_t_fit'].append(t_fit-t_trial_start)\n res['hist_t_acq'].append(t_acq-t_fit)\n \n for i in range(pool_size):\n success = pool_success[i]\n if success:\n res['hist_argm'].append(np_pool_argm[i])\n res['hist_argx'].append(np_pool_argx[i])\n res['hist_yq'].append(pool_yq[i])\n res['hist_y_ground'].append(pool_y_ground[i])\n res['hist_config'].append(pool_config[i])\n res['hist_t_query_m'].append(t_query_m_pool[i])\n res['hist_t_query_h'].append(t_query_h_pool[i])\n logger.write(' - Success add argm = '+np.array2string(np.array(np_pool_argm[i]))+'\\n')\n logger.write(' - Success add argx = '+np.array2string(np.array(np_pool_argx[i]))+'\\n')\n logger.write(' * t_query_m = '+str(t_query_m_pool[i])+' secs\\n')\n logger.write(' * t_query_h = '+str(t_query_h_pool[i])+' secs\\n')\n else:\n logger.write(' - Failed add argm = '+np.array2string(np.array(np_pool_argm[i]))+'\\n')\n logger.write(' - Failed add argx = '+np.array2string(np.array(np_pool_argx[i]))+'\\n')\n #\n #\n\n\n logger.write(' - t_fit_surrogate = '+str(t_fit-t_trial_start)+' secs\\n')\n logger.write(' - t_acq = '+str(t_acq-t_fit)+' secs\\n')\n logger.write(' - t_query = '+str(t_query-t_acq)+' secs\\n')\n logger.write(' - total_elapsed = '+str(t_query-exp_t_start)+' secs\\n')\n logger.flush()\n\n with open(pickle_name, 'wb') as handle:\n pickle.dump(res, handle, protocol=pickle.HIGHEST_PROTOCOL)\n #\n #\n logger.close()\n \ndef experiment_ao_hmc(dataset, method_config, mf_nn_surrogate_config, hmc_sampler_config, horizon, res_path, trial_id):\n \n layers = misc.full_auto_regressive_layers(\n dataset.in_dim, dataset.out_dim, \n mf_nn_surrogate_config.hidden_depths,\n mf_nn_surrogate_config.hidden_widths,\n )\n sampling = {\n 'step_size':hmc_sampler_config.step_size, \n 'L':hmc_sampler_config.L, \n 'burn':hmc_sampler_config.burn, \n 'Ns':hmc_sampler_config.Ns\n }\n \n res = {}\n res['hist_argm'] = []\n res['hist_argx'] = []\n res['hist_yq'] = []\n res['hist_y_ground'] = []\n res['hist_config'] = []\n res['hist_t_fit'] = []\n res['hist_t_acq'] = []\n res['hist_t_query_m'] = []\n res['hist_t_query_h'] = []\n \n pickle_name = os.path.join(res_path, 'trial'+str(trial_id)+'.pickle')\n \n log_file_name = os.path.join(res_path, 'logs_trial'+str(trial_id)+'.txt')\n logger = open(log_file_name, 'w+') \n logger.write('===============================================\\n')\n logger.write(' Experiment with AO Batch \\n')\n logger.write('===============================================\\n')\n logger.write('Experiment start at: '+datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.flush()\n \n exp_t_start = time()\n \n \n for t in trange(horizon, desc='experiment', leave=True):\n \n t_trial_start = time()\n \n model = Model.FullAutoRegressive(layers, mf_nn_surrogate_config.activation, \n torch.device(mf_nn_surrogate_config.surrogate_placement))\n hamil_opt = BayesOpt.HamilBayesOpt(model, dataset, sampling)\n hmc_samples = hamil_opt.fit(constraint=method_config.constraint)\n \n t_fit = time()\n\n pool_argm, pool_argx, _ = hamil_opt.ao_ratio_batch_step(hmc_samples, method_config.batch_size, alters=5)\n\n t_acq = time()\n\n np_pool_argx = []\n np_pool_argm = []\n for argm, argx in zip(pool_argm, pool_argx):\n np_pool_argx.append(argx.data.cpu().numpy())\n np_pool_argm.append(argm.data.cpu().numpy())\n #\n #pool_yq, pool_y_ground, pool_success = dataset.add_pool(np_pool_argx, np_pool_argm, scaled_input=True)\n pool_yq, pool_y_ground, pool_success, pool_config, t_query_m_pool, t_query_h_pool =\\\n dataset.add_pool_interpret(np_pool_argx, np_pool_argm, scaled_input=True)\n \n t_query = time()\n \n logger.write('* Optimization step'+str(t+1)+' finished at ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n \n pool_size = len(pool_success)\n \n res['hist_t_fit'].append(t_fit-t_trial_start)\n res['hist_t_acq'].append(t_acq-t_fit)\n \n for i in range(pool_size):\n success = pool_success[i]\n if success:\n res['hist_argm'].append(np_pool_argm[i])\n res['hist_argx'].append(np_pool_argx[i])\n res['hist_yq'].append(pool_yq[i])\n res['hist_y_ground'].append(pool_y_ground[i])\n res['hist_config'].append(pool_config[i])\n res['hist_t_query_m'].append(t_query_m_pool[i])\n res['hist_t_query_h'].append(t_query_h_pool[i])\n logger.write(' - Success add argm = '+np.array2string(np.array(np_pool_argm[i]))+'\\n')\n logger.write(' - Success add argx = '+np.array2string(np.array(np_pool_argx[i]))+'\\n')\n logger.write(' * t_query_m = '+str(t_query_m_pool[i])+' secs\\n')\n logger.write(' * t_query_h = '+str(t_query_h_pool[i])+' secs\\n')\n else:\n logger.write(' - Failed add argm = '+np.array2string(np.array(np_pool_argm[i]))+'\\n')\n logger.write(' - Failed add argx = '+np.array2string(np.array(np_pool_argx[i]))+'\\n')\n #\n #\n\n\n logger.write(' - t_fit_surrogate = '+str(t_fit-t_trial_start)+' secs\\n')\n logger.write(' - t_acq = '+str(t_acq-t_fit)+' secs\\n')\n logger.write(' - t_query = '+str(t_query-t_acq)+' secs\\n')\n logger.write(' - total_elapsed = '+str(t_query-exp_t_start)+' secs\\n')\n logger.flush()\n\n with open(pickle_name, 'wb') as handle:\n pickle.dump(res, handle, protocol=pickle.HIGHEST_PROTOCOL)\n #\n #\n logger.close()\n \n \ndef experiment_mf_mes(domain_name, horizon, num_trials, num_inits, init_i_trial, penalty):\n \n Ninit=\"\".join([str(e)+',' for e in num_inits])[:-1]\n costs = \"\".join([str(e)+',' for e in penalty])[:-1]\n \n os.chdir('baselines/MF-MES/experiments')\n \n subprocess.run([\"python\", \"customized_bo_runner.py\", \"-m\", \"MFMES_RFM\", \"-d\", domain_name, \"-t\", str(horizon),\n \"-c\", \"2000000\", \"-i\", Ninit, \"-s\", costs, \"-T\", str(num_trials), \"-f\", str(init_i_trial)])\n os.chdir('../../')\n \ndef experiment_par_mf_mes(domain_name, horizon, num_trials, num_inits, init_i_trial, penalty):\n \n Ninit=\"\".join([str(e)+',' for e in num_inits])[:-1]\n costs = \"\".join([str(e)+',' for e in penalty])[:-1]\n \n os.chdir('baselines/MF-MES/experiments')\n \n subprocess.run([\"python\", \"customized_parallel_bayesopt_exp.py\", \"-m\", \"Parallel_MFMES_RFM\", \"-d\", domain_name, \"-t\", str(horizon),\n \"-c\", \"2000000\", \"-i\", Ninit, \"-s\", costs, \"-T\", str(num_trials), \"-f\", str(init_i_trial)])\n os.chdir('../../')\n \ndef experiment_mf_gp_ucb(domain_name, horizon, num_trials, num_inits, init_i_trial, penalty):\n \n Ninit=\"\".join([str(e)+',' for e in num_inits])[:-1]\n costs = \"\".join([str(e)+',' for e in penalty])[:-1]\n \n os.chdir('baselines/MF-MES/experiments')\n \n subprocess.run([\"python\", \"customized_bo_runner.py\", \"-m\", \"BOCA\", \"-d\", domain_name, \"-t\", str(horizon), \n \"-f\", str(init_i_trial),\n \"-c\", \"2000000\", \"-i\", Ninit, \"-s\", costs, \"-T\", str(num_trials), \"-f\", str(init_i_trial)])\n os.chdir('../../')\n \ndef experiment_smac(domain_name, horizon, num_trials, init_i_trial, penalty, placement):\n for t in range(num_trials):\n trial = t + init_i_trial\n \n res_path = os.path.join('results', domain_name, 'smac', 'trial'+str(trial))\n try:\n if not os.path.exists(res_path):\n os.makedirs(res_path, exist_ok=True)\n #\n print(\"Directory '%s' created successfully\" % (res_path))\n except OSError as error:\n print(\"Directory '%s' can not be created\" % (res_path))\n #\n\n client = SMAC3.Client(domain_name, penalty[-1], horizon, res_path, torch.device(placement))\n client.minimize()\n \ndef experiment_gp_kernel(domain_name, horizon, num_trials, init_i_trial, penalty, placement):\n for t in range(num_trials):\n trial = t + init_i_trial\n \n res_path = os.path.join('results', domain_name, 'gp_kernel', 'trial'+str(trial))\n try:\n if not os.path.exists(res_path):\n os.makedirs(res_path, exist_ok=True)\n #\n print(\"Directory '%s' created successfully\" % (res_path))\n except OSError as error:\n print(\"Directory '%s' can not be created\" % (res_path))\n #\n\n client = SMAC4.Client(domain_name, penalty[-1], horizon, res_path, torch.device(placement))\n client.minimize()\n\ndef experiment_hyperband(domain_name, horizon, num_trials, init_i_trial, penalty, placement):\n for t in range(num_trials):\n trial = t + init_i_trial\n \n res_path = os.path.join('results', domain_name, 'hyperband', 'trial'+str(trial))\n try:\n if not os.path.exists(res_path):\n os.makedirs(res_path, exist_ok=True)\n #\n print(\"Directory '%s' created successfully\" % (res_path))\n except OSError as error:\n print(\"Directory '%s' can not be created\" % (res_path))\n #\n\n client = Hyperband.Client(domain_name, penalty[-1], horizon, res_path, torch.device(placement))\n client.minimize()\n\n \ndef experiment_bohb(domain_name, horizon, num_trials, init_i_trial, penalty, placement):\n for t in range(num_trials):\n trial = t + init_i_trial\n \n res_path = os.path.join('results', domain_name, 'bohb', 'trial'+str(trial))\n try:\n if not os.path.exists(res_path):\n os.makedirs(res_path, exist_ok=True)\n #\n print(\"Directory '%s' created successfully\" % (res_path))\n except OSError as error:\n print(\"Directory '%s' can not be created\" % (res_path))\n #\n\n client = BOHB.Client(domain_name, penalty[-1], horizon, res_path, torch.device(placement))\n client.minimize()\n \n \ndef experiment_multitask_bo(dataset, method_config, horizon, res_path, trial_id):\n \n res = {}\n res['hist_argm'] = []\n res['hist_argx'] = []\n res['hist_yq'] = []\n res['hist_y_ground'] = []\n res['hist_config'] = []\n res['hist_t_fit'] = []\n res['hist_t_acq'] = []\n res['hist_t_query_m'] = []\n res['hist_t_query_h'] = []\n \n pickle_name = os.path.join(res_path, 'trial'+str(trial_id)+'.pickle')\n \n log_file_name = os.path.join(res_path, 'logs_trial'+str(trial_id)+'.txt')\n logger = open(log_file_name, 'w+') \n logger.write('===============================================\\n')\n logger.write(' Experiment with Multitask BO \\n')\n logger.write('===============================================\\n')\n logger.write('Experiment start at: '+datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.flush()\n\n base_dim = method_config.base_dim\n base_hidden_depth = method_config.base_hidden_depth\n base_hidden_width = method_config.base_hidden_width\n surrogate_device = torch.device(method_config.surrogate_placement)\n \n exp_t_start = time()\n\n for t in trange(horizon, desc='experiment', leave=True):\n \n t_trial_start = time()\n \n hpo = SHPO.HPO(dataset, base_dim, base_hidden_depth, base_hidden_width, surrogate_device)\n \n t_fit = time()\n \n np_argm, np_argx = hpo.step(dataset.penalty)\n \n t_acq = time()\n\n #yq, y_ground, success = dataset.add(np_argx, np_argm, scaled_input=True)\n yq, y_ground, success, config, t_query_m, t_query_h = dataset.add_interpret(np_argx, np_argm, scaled_input=False)\n \n t_query = time()\n \n if success:\n res['hist_argm'].append(np_argm)\n res['hist_argx'].append(np_argx)\n res['hist_yq'].append(yq)\n res['hist_y_ground'].append(y_ground)\n res['hist_config'].append(config)\n res['hist_t_fit'].append(t_fit-t_trial_start)\n res['hist_t_acq'].append(t_acq-t_fit)\n res['hist_t_query_m'].append(t_query_m)\n res['hist_t_query_h'].append(t_query_h) \n\n logger.write('* Optimization step'+str(t+1)+' finished at ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.write(' - argm = '+str(np_argm)+'\\n')\n logger.write(' - argx = '+np.array2string(np_argx)+'\\n')\n logger.write(' - t_fit_surrogate = '+str(t_fit-t_trial_start)+' secs\\n')\n logger.write(' - t_acq = '+str(t_acq-t_fit)+' secs\\n')\n \n logger.write(' - t_query_m = '+str(t_query_m)+' secs\\n')\n logger.write(' - t_query_h = '+str(t_query_h)+' secs\\n')\n \n logger.write(' - t_query = '+str(t_query-t_acq)+' secs\\n')\n logger.write(' - total_elapsed = '+str(t_query-exp_t_start)+' secs\\n')\n logger.flush()\n else:\n logger.write('* Optimization step'+str(t+1)+' FAILED at ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")+'\\n')\n logger.write(' - argm = '+str(np_argm)+'\\n')\n logger.write(' - argx = '+np.array2string(np_argx)+'\\n')\n logger.flush()\n\n with open(pickle_name, 'wb') as handle:\n pickle.dump(res, handle, protocol=pickle.HIGHEST_PROTOCOL)\n #\n #\n \n logger.close()\n \ndef experiment_gp_ts(domain_name):\n \n os.chdir('baselines/gp-parallel-ts')\n# print('run_gp_tes')\n \n runner_name = 'run_gpts_'+domain_name+'.py'\n \n# print(runner_name)\n \n# if domain_name == 'Diabetes':\n subprocess.run([\"python\", runner_name])\n #\n os.chdir('../../')\n \n\ndef evaluation(**kwargs):\n configs = parse_exp_configs(kwargs)\n \n domain_name = configs['domain_config'].domain_name\n domain_penalty = configs['domain_config'].penalty\n domain_placement = configs['domain_config'].domain_placement\n domain_Ninits = configs['domain_config'].num_inits\n \n preload = os.path.join('data','preload',domain_name+'.pickle')\n\n mf_func = functions.MfFunc(domain_name, domain_penalty, torch.device(domain_placement))\n \n #dataset = Dataset.MfData(mf_func, preload)\n\n if configs['opt_config'].algorithm_name in MF_DNN_APPROACH:\n \n init_i_trial = configs['opt_config'].init_i_trial\n\n for t in range(configs['opt_config'].num_trials):\n dataset = Dataset.MfData(mf_func, preload)\n tid = t + init_i_trial\n res_path = os.path.join('results', domain_name, configs['opt_config'].algorithm_name, 'trial'+str(tid))\n create_path(res_path)\n experiment_mf_dnn(\n dataset, \n configs['method_config'],\n configs['mf_nn_surrogate_config'], \n configs['hmc_config'],\n configs['opt_config'].horizon,\n res_path,\n tid,\n )\n #\n\n elif configs['opt_config'].algorithm_name in SINGLE_BASED_APPROACH:\n\n\n init_i_trial = configs['opt_config'].init_i_trial\n \n for t in range(configs['opt_config'].num_trials):\n dataset = Dataset.MfData(mf_func, preload)\n tid = t + init_i_trial\n res_path = os.path.join('results', domain_name, configs['opt_config'].algorithm_name, 'trial'+str(tid))\n create_path(res_path)\n experiment_single_hmc(\n dataset, \n configs['method_config'],\n configs['mf_nn_surrogate_config'], \n configs['hmc_config'],\n configs['opt_config'].horizon,\n res_path,\n tid,\n )\n #\n \n elif configs['opt_config'].algorithm_name in RANDOM_APPROACH:\n\n\n init_i_trial = configs['opt_config'].init_i_trial\n \n for t in range(configs['opt_config'].num_trials):\n dataset = Dataset.MfData(mf_func, preload)\n tid = t + init_i_trial\n res_path = os.path.join('results', domain_name, configs['opt_config'].algorithm_name, 'trial'+str(tid))\n create_path(res_path)\n experiment_random(\n dataset, \n configs['method_config'],\n configs['mf_nn_surrogate_config'], \n configs['hmc_config'],\n configs['opt_config'].horizon,\n res_path,\n tid,\n )\n #\n \n elif configs['opt_config'].algorithm_name in PAR_HMC_BASED_APPROACH:\n\n init_i_trial = configs['opt_config'].init_i_trial\n \n for t in range(configs['opt_config'].num_trials):\n dataset = Dataset.MfData(mf_func, preload)\n tid = t + init_i_trial\n res_path = os.path.join('results', domain_name, configs['opt_config'].algorithm_name, 'trial'+str(tid))\n create_path(res_path)\n experiment_par_hmc(\n dataset, \n configs['method_config'],\n configs['mf_nn_surrogate_config'], \n configs['hmc_config'],\n configs['opt_config'].horizon,\n res_path,\n tid,\n )\n #\n \n elif configs['opt_config'].algorithm_name in BATCH_HMC_BASED_APPROACH:\n\n\n init_i_trial = configs['opt_config'].init_i_trial\n \n# if configs['method_config'].batch_mode=='linear':\n# beta = configs['method_config'].beta\n# res_path = os.path.join('results', domain_name, configs['opt_config'].algorithm_name+'_'+str(beta), 'init'+str(init_i_trial))\n# elif configs['method_config'].batch_mode=='ratio':\n# res_path = os.path.join('results', domain_name, configs['opt_config'].algorithm_name, 'init'+str(init_i_trial))\n# else:\n# raise Exception('Error: invalid batch mode')\n# #\n \n# try:\n# if not os.path.exists(res_path):\n# os.makedirs(res_path, exist_ok=True)\n# #\n# print(\"Directory '%s' created successfully\" % (res_path))\n# except OSError as error:\n# print(\"Directory '%s' can not be created\" % (res_path))\n# #\n \n for t in range(configs['opt_config'].num_trials):\n dataset = Dataset.MfData(mf_func, preload)\n tid = t + init_i_trial\n res_path = os.path.join('results', domain_name, configs['opt_config'].algorithm_name, 'trial'+str(tid))\n create_path(res_path)\n experiment_batch_hmc(\n dataset, \n configs['method_config'],\n configs['mf_nn_surrogate_config'], \n configs['hmc_config'],\n configs['opt_config'].horizon,\n res_path,\n tid,\n )\n #\n \n elif configs['opt_config'].algorithm_name in AO_HMC_BASED_APPROACH:\n\n\n init_i_trial = configs['opt_config'].init_i_trial\n\n for t in range(configs['opt_config'].num_trials):\n dataset = Dataset.MfData(mf_func, preload)\n tid = t + init_i_trial\n res_path = os.path.join('results', domain_name, configs['opt_config'].algorithm_name, 'trial'+str(tid))\n create_path(res_path)\n experiment_ao_hmc(\n dataset, \n configs['method_config'],\n configs['mf_nn_surrogate_config'], \n configs['hmc_config'],\n configs['opt_config'].horizon,\n res_path,\n tid,\n )\n #\n \n elif configs['opt_config'].algorithm_name == 'mf_gp_ucb':\n experiment_mf_gp_ucb(\n domain_name, \n configs['opt_config'].horizon, \n configs['opt_config'].num_trials, \n domain_Ninits, \n configs['opt_config'].init_i_trial,\n domain_penalty\n )\n elif configs['opt_config'].algorithm_name == 'mf_mes':\n experiment_mf_mes(\n domain_name, \n configs['opt_config'].horizon, \n configs['opt_config'].num_trials, \n domain_Ninits, \n configs['opt_config'].init_i_trial,\n domain_penalty\n )\n elif configs['opt_config'].algorithm_name == 'par_mf_mes':\n #print('********** New implemnted par mfmes **********')\n experiment_par_mf_mes(\n domain_name, \n configs['opt_config'].horizon, \n configs['opt_config'].num_trials, \n domain_Ninits, \n configs['opt_config'].init_i_trial,\n domain_penalty\n )\n elif configs['opt_config'].algorithm_name == 'smac':\n experiment_smac(\n domain_name, \n configs['opt_config'].horizon, \n configs['opt_config'].num_trials, \n configs['opt_config'].init_i_trial, \n domain_penalty,\n domain_placement\n )\n elif configs['opt_config'].algorithm_name == 'gp_kernel':\n experiment_gp_kernel(\n domain_name, \n configs['opt_config'].horizon, \n configs['opt_config'].num_trials, \n configs['opt_config'].init_i_trial, \n domain_penalty,\n domain_placement\n )\n elif configs['opt_config'].algorithm_name == 'hyperband':\n experiment_hyperband(\n domain_name, \n configs['opt_config'].horizon, \n configs['opt_config'].num_trials, \n configs['opt_config'].init_i_trial, \n domain_penalty,\n domain_placement\n )\n elif configs['opt_config'].algorithm_name == 'bohb':\n experiment_bohb(\n domain_name, \n configs['opt_config'].horizon, \n configs['opt_config'].num_trials, \n configs['opt_config'].init_i_trial, \n domain_penalty,\n domain_placement\n )\n elif configs['opt_config'].algorithm_name in MT_APPROACH:\n\n\n init_i_trial = configs['opt_config'].init_i_trial\n \n for t in range(configs['opt_config'].num_trials):\n dataset = Dataset.MfData(mf_func, preload)\n tid = t + init_i_trial\n res_path = os.path.join('results', domain_name, configs['opt_config'].algorithm_name, 'trial'+str(tid))\n create_path(res_path)\n experiment_multitask_bo(\n dataset, \n configs['method_config'], \n configs['opt_config'].horizon,\n res_path,\n tid,\n )\n #\n elif configs['opt_config'].algorithm_name == 'gp_ts':\n experiment_gp_ts(\n domain_name\n )\n \n \n \n\nif __name__=='__main__':\n fire.Fire(evaluation)" ]
[ [ "torch.device", "numpy.array", "numpy.random.randint", "numpy.array2string" ] ]
czbiohub/scRFE
[ "716b0f59b4b949e6842af3080276c7ea835618a9" ]
[ "scripts/practiceScripts/scRFE 9.56.52 PM.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # scRFE\n\n# In[ ]:\n\n\n\n\n\n# In[84]:\n\n\n# MENTION ONE VS ALL CLASSIFICATION in description\n\n\n# In[117]:\n\n\n# Imports \nimport numpy as np\nimport pandas as pd\nimport scanpy as sc\nimport random\nfrom anndata import read_h5ad\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.feature_selection import RFE\nfrom sklearn.feature_selection import RFECV\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n# In[118]:\n\n\n# read in anndata file \nadata = read_h5ad('/Users/madelinepark/Downloads/Kidney_facs.h5ad')\n\n\n# In[119]:\n\n\ndef filterNormalize (dataMatrix, classOfInterest):\n np.random.seed(644685)\n sc.logging.print_versions()\n sc.settings.verbosity = 3 \n sc.logging.print_versions()\n tiss = dataMatrix\n tiss.obs['n_counts'] = tiss.X.sum(axis=1).A1\n sc.pp.filter_cells(tiss, min_genes=250)\n sc.pp.filter_genes(tiss, min_cells=3)\n tiss = tiss[tiss.obs['n_counts'] > 1500, :]\n sc.pp.normalize_per_cell(tiss, counts_per_cell_after=1e5)\n sc.pp.log1p(tiss)\n tiss.raw = tiss\n tiss = tiss[tiss.obs[classOfInterest]!='nan']\n return tiss\n\n\n# In[120]:\n\n\n# goal: get labels on a per class basis that will go into randomForest function for y\ndef getLabels (dataMatrix, classOfInterest): \n \"\"\"\n Gets labels on a per class basis that will inputted to the randomForest function\n \n Parameters\n ----------\n dataMatrix : anndata object\n The data file of interest\n classOfInterest : str\n The class you will split the data by in the set of dataMatrix.obs\n \n Returns\n -------\n labelsDict : dict\n Dictionary with labels for each class \n \"\"\"\n dataMatrix = filterNormalize (dataMatrix, classOfInterest)\n labelsDict = {}\n for label in np.unique(dataMatrix.obs[classOfInterest]):\n lists = [] \n for obs in dataMatrix.obs[classOfInterest]:\n if obs == label: \n lists.append('A')\n else:\n lists.append('B')\n labelsDict[label] = lists #this is usually in line w if and else \n return labelsDict\n\n\n# In[121]:\n\n\ndef makeOneForest (dataMatrix, classOfInterest, labelOfInterest, nEstimators = 5000, \n randomState = 0, nJobs = -1, oobScore = True, Step = 0.2, Cv = 5): \n \"\"\"\n Builds and runs a random forest for one label in a class of interest\n \n Parameters\n ----------\n dataMatrix : anndata object\n The data file of interest\n classOfInterest : str\n The class you will split the data by in the set of dataMatrix.obs\n labelOfInterest : str\n The specific label within the class that the random forezt will run a \n \"one vs all\" classification on\n nEstimators : int\n The number of trees in the forest\n randomState : int\n Controls random number being used\n nJobs : int\n The number of jobs to run in parallel\n oobScore : bool\n Whether to use out-of-bag samples to estimate the generalization accuracy\n Step : float\n Corresponds to percentage of features to remove at each iteration\n Cv : int\n Determines the cross-validation splitting strategy\n \n Returns\n -------\n feature_selected : list\n list of top features from random forest\n selector.estimator_.feature_importances_ : list\n list of top ginis corresponding to to features\n \n \"\"\"\n dataMatrix = filterNormalize (dataMatrix, classOfInterest)\n\n print('makeOneForest' + labelOfInterest)\n labelsDict = getLabels(dataMatrix, classOfInterest) \n\n feat_labels = dataMatrix.var_names #this is equivalent of the genes\n X = dataMatrix.X\n y = labelsDict[labelOfInterest]\n print('Y')\n print(len(y))\n clf = RandomForestClassifier(n_estimators = nEstimators, random_state = randomState, \n n_jobs = nJobs, oob_score = oobScore)\n selector = RFECV(clf, step = Step, cv = Cv)\n \n print('training...')\n clf.fit(X, y)\n selector.fit(X, y)\n feature_selected = feat_labels[selector.support_] \n\n return feature_selected, selector.estimator_.feature_importances_ \n\n\n# In[122]:\n\n\ndef resultWrite (classOfInterest, results_df, labelOfInterest,\n feature_selected, feature_importance):\n print ('result writing')\n print(results_df)\n \n column_headings = [] \n column_headings.append(labelOfInterest)\n column_headings.append(labelOfInterest + '_gini')\n resaux = pd.DataFrame(columns = column_headings)\n resaux[labelOfInterest] = feature_selected\n resaux[labelOfInterest + '_gini'] = feature_importance\n resaux = resaux.sort_values(by = [labelOfInterest + '_gini'], ascending = False)\n resaux.reset_index(drop = True, inplace = True)\n\n results_df = pd.concat([results_df, resaux], axis=1)\n return results_df \n\n\n# In[123]:\n\n\ndef scRFE(dataMatrix, classOfInterest, nEstimators = 5000, randomState = 0, \n nJobs = -1, oobScore = True, Step = 0.2, Cv = 5):\n \"\"\"\n Builds and runs a random forest with one vs all classification for each label \n for one class of interest\n \n Parameters\n ----------\n dataMatrix : anndata object\n The data file of interest\n classOfInterest : str\n The class you will split the data by in the set of dataMatrix.obs\n labelOfInterest : str\n The specific label within the class that the random forezt will run a \n \"one vs all\" classification on\n nEstimators : int\n The number of trees in the forest\n randomState : int\n Controls random number being used\n nJobs : int\n The number of jobs to run in parallel\n oobScore : bool\n Whether to use out-of-bag samples to estimate the generalization accuracy\n Step : float\n Corresponds to percentage of features to remove at each iteration\n Cv : int\n Determines the cross-validation splitting strategy\n \n Returns\n -------\n results_df : pd.DataFrame\n Dataframe with results for each label in the class, formatted as \n \"label\" for one column, then \"label + gini\" for the corresponding column\n \n \"\"\"\n \n dataMatrix = filterNormalize (dataMatrix, classOfInterest)\n results_df = pd.DataFrame()\n for labelOfInterest in np.unique(dataMatrix.obs[classOfInterest]): #for timeliness \n print( 'scRFE' + labelOfInterest)\n \n feature_selected, feature_importance = makeOneForest(dataMatrix, \n classOfInterest, \n labelOfInterest = labelOfInterest)\n \n results_df = resultWrite (classOfInterest, results_df, \n labelOfInterest = labelOfInterest, \n feature_selected = feature_selected, \n feature_importance = feature_importance)\n print(results_df.shape)\n return results_df\n\n" ]
[ [ "sklearn.feature_selection.RFECV", "numpy.random.seed", "pandas.DataFrame", "sklearn.ensemble.RandomForestClassifier", "pandas.concat", "numpy.unique" ] ]
apatlpo/dask
[ "1fd5f14af228d3f9b90b9f96213a55a014178a05" ]
[ "dask/dataframe/io/tests/test_demo.py" ]
[ "import pandas.util.testing as tm\nimport pandas as pd\nimport pytest\n\nimport dask.dataframe as dd\nfrom dask.dataframe.utils import assert_eq\n\n\ndef test_make_timeseries():\n df = dd.demo.make_timeseries('2000', '2015', {'A': float, 'B': int, 'C': str},\n freq='2D', partition_freq='6M')\n\n assert df.divisions[0] == pd.Timestamp('2000-01-31', freq='6M')\n assert df.divisions[-1] == pd.Timestamp('2014-07-31', freq='6M')\n tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C']))\n assert df['A'].head().dtype == float\n assert df['B'].head().dtype == int\n assert df['C'].head().dtype == object\n assert df.divisions == tuple(pd.DatetimeIndex(start='2000', end='2015',\n freq='6M'))\n\n tm.assert_frame_equal(df.head(), df.head())\n\n a = dd.demo.make_timeseries('2000', '2015', {'A': float, 'B': int, 'C': str},\n freq='2D', partition_freq='6M', seed=123)\n b = dd.demo.make_timeseries('2000', '2015', {'A': float, 'B': int, 'C': str},\n freq='2D', partition_freq='6M', seed=123)\n c = dd.demo.make_timeseries('2000', '2015', {'A': float, 'B': int, 'C': str},\n freq='2D', partition_freq='6M', seed=456)\n d = dd.demo.make_timeseries('2000', '2015', {'A': float, 'B': int, 'C': str},\n freq='2D', partition_freq='3M', seed=123)\n e = dd.demo.make_timeseries('2000', '2015', {'A': float, 'B': int, 'C': str},\n freq='1D', partition_freq='6M', seed=123)\n tm.assert_frame_equal(a.head(), b.head())\n assert not (a.head(10) == c.head(10)).all().all()\n assert a._name == b._name\n assert a._name != c._name\n assert a._name != d._name\n assert a._name != e._name\n\n\ndef test_make_timeseries_no_args():\n df = dd.demo.make_timeseries()\n assert 1 < df.npartitions < 1000\n assert len(df.columns) > 1\n assert len(set(df.dtypes)) > 1\n\n\ndef test_no_overlaps():\n df = dd.demo.make_timeseries('2000', '2001', {'A': float},\n freq='3H', partition_freq='3M')\n\n assert all(df.get_partition(i).index.max().compute() <\n df.get_partition(i + 1).index.min().compute()\n for i in range(df.npartitions - 2))\n\n\[email protected]\[email protected]\ndef test_daily_stock():\n pytest.importorskip('pandas_datareader')\n df = dd.demo.daily_stock('GOOG', start='2010-01-01', stop='2010-01-30', freq='1h')\n assert isinstance(df, dd.DataFrame)\n assert 10 < df.npartitions < 31\n assert_eq(df, df)\n" ]
[ [ "pandas.Timestamp", "pandas.Index", "pandas.DatetimeIndex" ] ]
jin-s13/mmaction2
[ "ffa678c675744fea99246446c63177d947faea83" ]
[ "tests/test_data/test_loading.py" ]
[ "import copy\nimport os.path as osp\n\nimport mmcv\nimport numpy as np\nimport pytest\nimport torch\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\n\n# yapf: disable\nfrom mmaction.datasets.pipelines import (AudioDecode, AudioDecodeInit,\n AudioFeatureSelector, DecordDecode,\n DecordInit, DenseSampleFrames,\n FrameSelector,\n GenerateLocalizationLabels,\n LoadAudioFeature, LoadHVULabel,\n LoadLocalizationFeature,\n LoadProposals, OpenCVDecode,\n OpenCVInit, PyAVDecode, PyAVInit,\n RawFrameDecode, SampleAVAFrames,\n SampleFrames, SampleProposalFrames,\n UntrimmedSampleFrames)\n\n# yapf: enable\n\n\nclass ExampleSSNInstance:\n\n def __init__(self,\n start_frame,\n end_frame,\n num_frames,\n label=None,\n best_iou=None,\n overlap_self=None):\n self.start_frame = start_frame\n self.end_frame = min(end_frame, num_frames)\n self.label = label if label is not None else -1\n self.coverage = (end_frame - start_frame) / num_frames\n self.best_iou = best_iou\n self.overlap_self = overlap_self\n\n\nclass TestLoading:\n\n @staticmethod\n def check_keys_contain(result_keys, target_keys):\n \"\"\"Check if all elements in target_keys is in result_keys.\"\"\"\n return set(target_keys).issubset(set(result_keys))\n\n @classmethod\n def setup_class(cls):\n cls.img_path = osp.join(\n osp.dirname(osp.dirname(__file__)), 'data/test.jpg')\n cls.video_path = osp.join(\n osp.dirname(osp.dirname(__file__)), 'data/test.mp4')\n cls.wav_path = osp.join(\n osp.dirname(osp.dirname(__file__)), 'data/test.wav')\n cls.audio_spec_path = osp.join(\n osp.dirname(osp.dirname(__file__)), 'data/test.npy')\n cls.img_dir = osp.join(\n osp.dirname(osp.dirname(__file__)), 'data/test_imgs')\n cls.raw_feature_dir = osp.join(\n osp.dirname(osp.dirname(__file__)),\n 'data/test_activitynet_features')\n cls.bsp_feature_dir = osp.join(\n osp.dirname(osp.dirname(__file__)), 'data/test_bsp_features')\n cls.proposals_dir = osp.join(\n osp.dirname(osp.dirname(__file__)), 'data/test_proposals')\n cls.total_frames = 5\n cls.filename_tmpl = 'img_{:05}.jpg'\n cls.flow_filename_tmpl = '{}_{:05d}.jpg'\n video_total_frames = len(mmcv.VideoReader(cls.video_path))\n cls.audio_total_frames = video_total_frames\n cls.video_results = dict(\n filename=cls.video_path,\n label=1,\n total_frames=video_total_frames,\n start_index=0)\n cls.audio_results = dict(\n audios=np.random.randn(1280, ),\n audio_path=cls.wav_path,\n total_frames=cls.audio_total_frames,\n label=1,\n start_index=0)\n cls.audio_feature_results = dict(\n audios=np.random.randn(128, 80),\n audio_path=cls.audio_spec_path,\n total_frames=cls.audio_total_frames,\n label=1,\n start_index=0)\n cls.frame_results = dict(\n frame_dir=cls.img_dir,\n total_frames=cls.total_frames,\n filename_tmpl=cls.filename_tmpl,\n start_index=1,\n modality='RGB',\n offset=0,\n label=1)\n cls.flow_frame_results = dict(\n frame_dir=cls.img_dir,\n total_frames=cls.total_frames,\n filename_tmpl=cls.flow_filename_tmpl,\n modality='Flow',\n offset=0,\n label=1)\n cls.action_results = dict(\n video_name='v_test1',\n data_prefix=cls.raw_feature_dir,\n temporal_scale=5,\n boundary_ratio=0.1,\n duration_second=10,\n duration_frame=10,\n feature_frame=8,\n annotations=[{\n 'segment': [3.0, 5.0],\n 'label': 'Rock climbing'\n }])\n cls.proposal_results = dict(\n frame_dir=cls.img_dir,\n video_id='test_imgs',\n total_frames=cls.total_frames,\n filename_tmpl=cls.filename_tmpl,\n start_index=1,\n out_proposals=[[[\n 'test_imgs',\n ExampleSSNInstance(1, 4, 10, 1, 1, 1)\n ], 0], [['test_imgs',\n ExampleSSNInstance(2, 5, 10, 2, 1, 1)], 0]])\n\n cls.ava_results = dict(\n fps=30, timestamp=902, timestamp_start=840, shot_info=(0, 27000))\n\n cls.hvu_label_example1 = dict(\n categories=['action', 'object', 'scene', 'concept'],\n category_nums=[2, 5, 3, 2],\n label=dict(action=[0], object=[2, 3], scene=[0, 1]))\n cls.hvu_label_example2 = dict(\n categories=['action', 'object', 'scene', 'concept'],\n category_nums=[2, 5, 3, 2],\n label=dict(action=[1], scene=[1, 2], concept=[1]))\n\n def test_load_hvu_label(self):\n hvu_label_example1 = copy.deepcopy(self.hvu_label_example1)\n hvu_label_example2 = copy.deepcopy(self.hvu_label_example2)\n categories = hvu_label_example1['categories']\n category_nums = hvu_label_example1['category_nums']\n num_tags = sum(category_nums)\n num_categories = len(categories)\n\n loader = LoadHVULabel()\n assert repr(loader) == (f'{loader.__class__.__name__}('\n f'hvu_initialized={False})')\n\n result1 = loader(hvu_label_example1)\n label1 = torch.zeros(num_tags)\n mask1 = torch.zeros(num_tags)\n category_mask1 = torch.zeros(num_categories)\n\n assert repr(loader) == (f'{loader.__class__.__name__}('\n f'hvu_initialized={True})')\n\n label1[[0, 4, 5, 7, 8]] = 1.\n mask1[:10] = 1.\n category_mask1[:3] = 1.\n\n assert torch.all(torch.eq(label1, result1['label']))\n assert torch.all(torch.eq(mask1, result1['mask']))\n assert torch.all(torch.eq(category_mask1, result1['category_mask']))\n\n result2 = loader(hvu_label_example2)\n label2 = torch.zeros(num_tags)\n mask2 = torch.zeros(num_tags)\n category_mask2 = torch.zeros(num_categories)\n\n label2[[1, 8, 9, 11]] = 1.\n mask2[:2] = 1.\n mask2[7:] = 1.\n category_mask2[[0, 2, 3]] = 1.\n\n assert torch.all(torch.eq(label2, result2['label']))\n assert torch.all(torch.eq(mask2, result2['mask']))\n assert torch.all(torch.eq(category_mask2, result2['category_mask']))\n\n def test_sample_frames(self):\n target_keys = [\n 'frame_inds', 'clip_len', 'frame_interval', 'num_clips',\n 'total_frames'\n ]\n\n with pytest.warns(UserWarning):\n # start_index has been deprecated\n config = dict(\n clip_len=3, frame_interval=1, num_clips=5, start_index=1)\n SampleFrames(**config)\n\n # Sample Frame with no temporal_jitter\n # clip_len=3, frame_interval=1, num_clips=5\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=3, frame_interval=1, num_clips=5, temporal_jitter=False)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 15\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 15\n assert np.max(sample_frames_results['frame_inds']) <= 5\n assert np.min(sample_frames_results['frame_inds']) >= 1\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={3}, '\n f'frame_interval={1}, '\n f'num_clips={5}, '\n f'temporal_jitter={False}, '\n f'twice_sample={False}, '\n f'out_of_bound_opt=loop, '\n f'test_mode={False})')\n\n # Sample Frame with no temporal_jitter\n # clip_len=5, frame_interval=1, num_clips=5,\n # out_of_bound_opt='repeat_last'\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=5,\n frame_interval=1,\n num_clips=5,\n temporal_jitter=False,\n out_of_bound_opt='repeat_last')\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={5}, '\n f'frame_interval={1}, '\n f'num_clips={5}, '\n f'temporal_jitter={False}, '\n f'twice_sample={False}, '\n f'out_of_bound_opt=repeat_last, '\n f'test_mode={False})')\n\n def check_monotonous(arr):\n length = arr.shape[0]\n for i in range(length - 1):\n if arr[i] > arr[i + 1]:\n return False\n return True\n\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 25\n frame_inds = sample_frames_results['frame_inds'].reshape([5, 5])\n for i in range(5):\n assert check_monotonous(frame_inds[i])\n\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 25\n frame_inds = sample_frames_results['frame_inds'].reshape([5, 5])\n for i in range(5):\n assert check_monotonous(frame_inds[i])\n assert np.max(sample_frames_results['frame_inds']) <= 5\n assert np.min(sample_frames_results['frame_inds']) >= 1\n\n # Sample Frame with temporal_jitter\n # clip_len=4, frame_interval=2, num_clips=5\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4, frame_interval=2, num_clips=5, temporal_jitter=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 20\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 20\n assert np.max(sample_frames_results['frame_inds']) <= 5\n assert np.min(sample_frames_results['frame_inds']) >= 1\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={4}, '\n f'frame_interval={2}, '\n f'num_clips={5}, '\n f'temporal_jitter={True}, '\n f'twice_sample={False}, '\n f'out_of_bound_opt=loop, '\n f'test_mode={False})')\n\n # Sample Frame with no temporal_jitter in test mode\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4,\n frame_interval=1,\n num_clips=6,\n temporal_jitter=False,\n test_mode=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 24\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 24\n assert np.max(sample_frames_results['frame_inds']) <= 5\n assert np.min(sample_frames_results['frame_inds']) >= 1\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={4}, '\n f'frame_interval={1}, '\n f'num_clips={6}, '\n f'temporal_jitter={False}, '\n f'twice_sample={False}, '\n f'out_of_bound_opt=loop, '\n f'test_mode={True})')\n\n # Sample Frame with no temporal_jitter in test mode\n # clip_len=3, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=3,\n frame_interval=1,\n num_clips=6,\n temporal_jitter=False,\n test_mode=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 18\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 18\n assert np.max(sample_frames_results['frame_inds']) <= 5\n assert np.min(sample_frames_results['frame_inds']) >= 1\n\n # Sample Frame with no temporal_jitter to get clip_offsets\n # clip_len=1, frame_interval=1, num_clips=8\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 6\n config = dict(\n clip_len=1,\n frame_interval=1,\n num_clips=8,\n temporal_jitter=False,\n test_mode=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 8\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 8\n assert_array_equal(sample_frames_results['frame_inds'],\n np.array([1, 2, 2, 3, 4, 5, 5, 6]))\n\n # Sample Frame with no temporal_jitter to get clip_offsets\n # clip_len=1, frame_interval=1, num_clips=8\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 6\n config = dict(\n clip_len=1,\n frame_interval=1,\n num_clips=8,\n temporal_jitter=False,\n test_mode=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert sample_frames_results['start_index'] == 0\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 8\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 8\n assert_array_equal(sample_frames_results['frame_inds'],\n np.array([1, 2, 2, 3, 4, 5, 5, 6]))\n\n # Sample Frame with no temporal_jitter to get clip_offsets zero\n # clip_len=6, frame_interval=1, num_clips=1\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 5\n config = dict(\n clip_len=6,\n frame_interval=1,\n num_clips=1,\n temporal_jitter=False,\n test_mode=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert sample_frames_results['start_index'] == 0\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 6\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 6\n assert_array_equal(sample_frames_results['frame_inds'],\n [1, 2, 3, 4, 5, 1])\n\n # Sample Frame with no temporal_jitter to get avg_interval <= 0\n # clip_len=12, frame_interval=1, num_clips=20\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 30\n config = dict(\n clip_len=12,\n frame_interval=1,\n num_clips=20,\n temporal_jitter=False,\n test_mode=False)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert sample_frames_results['start_index'] == 0\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 240\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 240\n assert np.max(sample_frames_results['frame_inds']) <= 30\n assert np.min(sample_frames_results['frame_inds']) >= 1\n\n # Sample Frame with no temporal_jitter to get clip_offsets\n # clip_len=1, frame_interval=1, num_clips=8\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 6\n config = dict(\n clip_len=1,\n frame_interval=1,\n num_clips=8,\n temporal_jitter=False,\n test_mode=False)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert sample_frames_results['start_index'] == 0\n assert len(sample_frames_results['frame_inds']) == 8\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 8\n assert_array_equal(sample_frames_results['frame_inds'],\n np.array([1, 2, 3, 3, 4, 5, 5, 6]))\n\n # Sample Frame with no temporal_jitter to get clip_offsets zero\n # clip_len=12, frame_interval=1, num_clips=2\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 10\n config = dict(\n clip_len=12,\n frame_interval=1,\n num_clips=2,\n temporal_jitter=False,\n test_mode=False)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert sample_frames_results['start_index'] == 0\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 24\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 24\n assert np.max(sample_frames_results['frame_inds']) <= 10\n assert np.min(sample_frames_results['frame_inds']) >= 1\n\n # Sample Frame using twice sample\n # clip_len=12, frame_interval=1, num_clips=2\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 40\n config = dict(\n clip_len=12,\n frame_interval=1,\n num_clips=2,\n temporal_jitter=False,\n twice_sample=True,\n test_mode=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert sample_frames_results['start_index'] == 0\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 48\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 48\n assert np.max(sample_frames_results['frame_inds']) <= 40\n assert np.min(sample_frames_results['frame_inds']) >= 1\n\n def test_dense_sample_frames(self):\n target_keys = [\n 'frame_inds', 'clip_len', 'frame_interval', 'num_clips',\n 'total_frames'\n ]\n\n # Dense sample with no temporal_jitter in test mode\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4,\n frame_interval=1,\n num_clips=6,\n temporal_jitter=False,\n test_mode=True)\n dense_sample_frames = DenseSampleFrames(**config)\n dense_sample_frames_results = dense_sample_frames(video_result)\n assert dense_sample_frames_results['start_index'] == 0\n assert self.check_keys_contain(dense_sample_frames_results.keys(),\n target_keys)\n assert len(dense_sample_frames_results['frame_inds']) == 240\n dense_sample_frames_results = dense_sample_frames(frame_result)\n assert len(dense_sample_frames_results['frame_inds']) == 240\n assert repr(dense_sample_frames) == (\n f'{dense_sample_frames.__class__.__name__}('\n f'clip_len={4}, '\n f'frame_interval={1}, '\n f'num_clips={6}, '\n f'sample_range={64}, '\n f'num_sample_positions={10}, '\n f'temporal_jitter={False}, '\n f'out_of_bound_opt=loop, '\n f'test_mode={True})')\n\n # Dense sample with no temporal_jitter\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4, frame_interval=1, num_clips=6, temporal_jitter=False)\n dense_sample_frames = DenseSampleFrames(**config)\n dense_sample_frames_results = dense_sample_frames(video_result)\n assert dense_sample_frames_results['start_index'] == 0\n assert self.check_keys_contain(dense_sample_frames_results.keys(),\n target_keys)\n assert len(dense_sample_frames_results['frame_inds']) == 24\n dense_sample_frames_results = dense_sample_frames(frame_result)\n assert len(dense_sample_frames_results['frame_inds']) == 24\n\n # Dense sample with no temporal_jitter, sample_range=32 in test mode\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4,\n frame_interval=1,\n num_clips=6,\n sample_range=32,\n temporal_jitter=False,\n test_mode=True)\n dense_sample_frames = DenseSampleFrames(**config)\n dense_sample_frames_results = dense_sample_frames(video_result)\n assert dense_sample_frames_results['start_index'] == 0\n assert self.check_keys_contain(dense_sample_frames_results.keys(),\n target_keys)\n assert len(dense_sample_frames_results['frame_inds']) == 240\n dense_sample_frames_results = dense_sample_frames(frame_result)\n assert len(dense_sample_frames_results['frame_inds']) == 240\n\n # Dense sample with no temporal_jitter, sample_range=32\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4,\n frame_interval=1,\n num_clips=6,\n sample_range=32,\n temporal_jitter=False)\n dense_sample_frames = DenseSampleFrames(**config)\n dense_sample_frames_results = dense_sample_frames(video_result)\n assert dense_sample_frames_results['start_index'] == 0\n assert self.check_keys_contain(dense_sample_frames_results.keys(),\n target_keys)\n assert len(dense_sample_frames_results['frame_inds']) == 24\n dense_sample_frames_results = dense_sample_frames(frame_result)\n assert len(dense_sample_frames_results['frame_inds']) == 24\n assert repr(dense_sample_frames) == (\n f'{dense_sample_frames.__class__.__name__}('\n f'clip_len={4}, '\n f'frame_interval={1}, '\n f'num_clips={6}, '\n f'sample_range={32}, '\n f'num_sample_positions={10}, '\n f'temporal_jitter={False}, '\n f'out_of_bound_opt=loop, '\n f'test_mode={False})')\n\n # Dense sample with no temporal_jitter, sample_range=1000 to check mod\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4,\n frame_interval=1,\n num_clips=6,\n sample_range=1000,\n temporal_jitter=False)\n dense_sample_frames = DenseSampleFrames(**config)\n dense_sample_frames_results = dense_sample_frames(video_result)\n assert dense_sample_frames_results['start_index'] == 0\n assert self.check_keys_contain(dense_sample_frames_results.keys(),\n target_keys)\n assert len(dense_sample_frames_results['frame_inds']) == 24\n dense_sample_frames_results = dense_sample_frames(frame_result)\n assert len(dense_sample_frames_results['frame_inds']) == 24\n\n # Dense sample with no temporal_jitter in test mode\n # sample_range=32, num_sample_positions=5\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4,\n frame_interval=1,\n num_clips=6,\n num_sample_positions=5,\n sample_range=32,\n temporal_jitter=False,\n test_mode=True)\n dense_sample_frames = DenseSampleFrames(**config)\n dense_sample_frames_results = dense_sample_frames(video_result)\n assert dense_sample_frames_results['start_index'] == 0\n assert self.check_keys_contain(dense_sample_frames_results.keys(),\n target_keys)\n assert len(dense_sample_frames_results['frame_inds']) == 120\n dense_sample_frames_results = dense_sample_frames(frame_result)\n assert len(dense_sample_frames_results['frame_inds']) == 120\n assert repr(dense_sample_frames) == (\n f'{dense_sample_frames.__class__.__name__}('\n f'clip_len={4}, '\n f'frame_interval={1}, '\n f'num_clips={6}, '\n f'sample_range={32}, '\n f'num_sample_positions={5}, '\n f'temporal_jitter={False}, '\n f'out_of_bound_opt=loop, '\n f'test_mode={True})')\n\n def test_untrim_sample_frames(self):\n\n target_keys = [\n 'frame_inds', 'clip_len', 'frame_interval', 'num_clips',\n 'total_frames'\n ]\n\n frame_result = dict(\n frame_dir=None,\n total_frames=100,\n filename_tmpl=None,\n modality='RGB',\n start_index=0,\n label=1)\n video_result = copy.deepcopy(self.video_results)\n\n config = dict(clip_len=1, frame_interval=16, start_index=0)\n sample_frames = UntrimmedSampleFrames(**config)\n sample_frames_results = sample_frames(frame_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 6\n assert_array_equal(sample_frames_results['frame_inds'],\n np.array([8, 24, 40, 56, 72, 88]))\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'frame_interval={16})')\n\n config = dict(clip_len=1, frame_interval=16, start_index=0)\n sample_frames = UntrimmedSampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n frame_inds = np.array(list(range(8, 300, 16)))\n assert len(sample_frames_results['frame_inds']) == frame_inds.shape[0]\n assert_array_equal(sample_frames_results['frame_inds'], frame_inds)\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'frame_interval={16})')\n\n config = dict(clip_len=1, frame_interval=16)\n sample_frames = UntrimmedSampleFrames(**config)\n frame_result_ = copy.deepcopy(frame_result)\n frame_result_['start_index'] = 1\n sample_frames_results = sample_frames(frame_result_)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 6\n assert_array_equal(sample_frames_results['frame_inds'],\n np.array([8, 24, 40, 56, 72, 88]) + 1)\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'frame_interval={16})')\n\n config = dict(clip_len=3, frame_interval=16, start_index=0)\n sample_frames = UntrimmedSampleFrames(**config)\n sample_frames_results = sample_frames(frame_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 18\n assert_array_equal(\n sample_frames_results['frame_inds'],\n np.array([\n 7, 8, 9, 23, 24, 25, 39, 40, 41, 55, 56, 57, 71, 72, 73, 87,\n 88, 89\n ]))\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={3}, '\n f'frame_interval={16})')\n\n def test_sample_ava_frames(self):\n target_keys = [\n 'fps', 'timestamp', 'timestamp_start', 'shot_info', 'frame_inds',\n 'clip_len', 'frame_interval'\n ]\n config = dict(clip_len=32, frame_interval=2)\n sample_ava_dataset = SampleAVAFrames(**config)\n ava_result = sample_ava_dataset(results=self.ava_results)\n assert self.check_keys_contain(ava_result.keys(), target_keys)\n assert ava_result['clip_len'] == 32\n assert ava_result['frame_interval'] == 2\n assert len(ava_result['frame_inds']) == 32\n assert repr(sample_ava_dataset) == (\n f'{sample_ava_dataset.__class__.__name__}('\n f'clip_len={32}, '\n f'frame_interval={2}, '\n f'test_mode={False})')\n\n # add test case in Issue #306\n config = dict(clip_len=8, frame_interval=8)\n sample_ava_dataset = SampleAVAFrames(**config)\n ava_result = sample_ava_dataset(results=self.ava_results)\n assert self.check_keys_contain(ava_result.keys(), target_keys)\n assert ava_result['clip_len'] == 8\n assert ava_result['frame_interval'] == 8\n assert len(ava_result['frame_inds']) == 8\n assert repr(sample_ava_dataset) == (\n f'{sample_ava_dataset.__class__.__name__}('\n f'clip_len={8}, '\n f'frame_interval={8}, '\n f'test_mode={False})')\n\n def test_sample_proposal_frames(self):\n target_keys = [\n 'frame_inds', 'clip_len', 'frame_interval', 'num_clips',\n 'total_frames', 'start_index'\n ]\n\n # test error cases\n with pytest.raises(TypeError):\n proposal_result = copy.deepcopy(self.proposal_results)\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=2,\n aug_segments=('error', 'error'),\n aug_ratio=0.5,\n temporal_jitter=False)\n sample_frames = SampleProposalFrames(**config)\n sample_frames(proposal_result)\n\n # test normal cases\n # Sample Frame with no temporal_jitter\n # clip_len=1, frame_interval=1\n # body_segments=2, aug_segments=(1, 1)\n proposal_result = copy.deepcopy(self.proposal_results)\n proposal_result['total_frames'] = 9\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=2,\n aug_segments=(1, 1),\n aug_ratio=0.5,\n temporal_jitter=False)\n sample_frames = SampleProposalFrames(**config)\n sample_frames_results = sample_frames(proposal_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 8\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'body_segments={2}, '\n f'aug_segments={(1, 1)}, '\n f'aug_ratio={(0.5, 0.5)}, '\n f'frame_interval={1}, '\n f'test_interval={6}, '\n f'temporal_jitter={False}, '\n f'mode=train)')\n\n # Sample Frame with temporal_jitter\n # clip_len=1, frame_interval=1\n # body_segments=2, aug_segments=(1, 1)\n proposal_result = copy.deepcopy(self.proposal_results)\n proposal_result['total_frames'] = 9\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=2,\n aug_segments=(1, 1),\n aug_ratio=0.5,\n temporal_jitter=True)\n sample_frames = SampleProposalFrames(**config)\n sample_frames_results = sample_frames(proposal_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 8\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'body_segments={2}, '\n f'aug_segments={(1, 1)}, '\n f'aug_ratio={(0.5, 0.5)}, '\n f'frame_interval={1}, '\n f'test_interval={6}, '\n f'temporal_jitter={True}, '\n f'mode=train)')\n\n # Sample Frame with no temporal_jitter in val mode\n # clip_len=1, frame_interval=1\n # body_segments=2, aug_segments=(1, 1)\n proposal_result = copy.deepcopy(self.proposal_results)\n proposal_result['total_frames'] = 9\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=2,\n aug_segments=(1, 1),\n aug_ratio=0.5,\n temporal_jitter=False,\n mode='val')\n sample_frames = SampleProposalFrames(**config)\n sample_frames_results = sample_frames(proposal_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 8\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'body_segments={2}, '\n f'aug_segments={(1, 1)}, '\n f'aug_ratio={(0.5, 0.5)}, '\n f'frame_interval={1}, '\n f'test_interval={6}, '\n f'temporal_jitter={False}, '\n f'mode=val)')\n\n # Sample Frame with no temporal_jitter in test mode\n # test_interval=2\n proposal_result = copy.deepcopy(self.proposal_results)\n proposal_result['out_proposals'] = None\n proposal_result['total_frames'] = 10\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=2,\n aug_segments=(1, 1),\n aug_ratio=0.5,\n test_interval=2,\n temporal_jitter=False,\n mode='test')\n sample_frames = SampleProposalFrames(**config)\n sample_frames_results = sample_frames(proposal_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 5\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'body_segments={2}, '\n f'aug_segments={(1, 1)}, '\n f'aug_ratio={(0.5, 0.5)}, '\n f'frame_interval={1}, '\n f'test_interval={2}, '\n f'temporal_jitter={False}, '\n f'mode=test)')\n\n # Sample Frame with no temporal_jitter to get clip_offsets zero\n # clip_len=1, frame_interval=1\n # body_segments=2, aug_segments=(1, 1)\n proposal_result = copy.deepcopy(self.proposal_results)\n proposal_result['total_frames'] = 3\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=2,\n aug_segments=(1, 1),\n aug_ratio=0.5,\n temporal_jitter=False)\n sample_frames = SampleProposalFrames(**config)\n sample_frames_results = sample_frames(proposal_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 8\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'body_segments={2}, '\n f'aug_segments={(1, 1)}, '\n f'aug_ratio={(0.5, 0.5)}, '\n f'frame_interval={1}, '\n f'test_interval={6}, '\n f'temporal_jitter={False}, '\n f'mode=train)')\n\n # Sample Frame with no temporal_jitter to\n # get clip_offsets zero in val mode\n # clip_len=1, frame_interval=1\n # body_segments=4, aug_segments=(2, 2)\n proposal_result = copy.deepcopy(self.proposal_results)\n proposal_result['total_frames'] = 3\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=4,\n aug_segments=(2, 2),\n aug_ratio=0.5,\n temporal_jitter=False,\n mode='val')\n sample_frames = SampleProposalFrames(**config)\n sample_frames_results = sample_frames(proposal_result)\n assert self.check_keys_contain(sample_frames_results.keys(),\n target_keys)\n assert len(sample_frames_results['frame_inds']) == 16\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'body_segments={4}, '\n f'aug_segments={(2, 2)}, '\n f'aug_ratio={(0.5, 0.5)}, '\n f'frame_interval={1}, '\n f'test_interval={6}, '\n f'temporal_jitter={False}, '\n f'mode=val)')\n\n def test_pyav_init(self):\n target_keys = ['video_reader', 'total_frames']\n video_result = copy.deepcopy(self.video_results)\n pyav_init = PyAVInit()\n pyav_init_result = pyav_init(video_result)\n assert self.check_keys_contain(pyav_init_result.keys(), target_keys)\n assert pyav_init_result['total_frames'] == 300\n assert repr(\n pyav_init) == f'{pyav_init.__class__.__name__}(io_backend=disk)'\n\n def test_pyav_decode(self):\n target_keys = ['frame_inds', 'imgs', 'original_shape']\n\n # test PyAV with 2 dim input and start_index = 0\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(0, self.total_frames,\n 2)[:, np.newaxis]\n pyav_init = PyAVInit()\n pyav_init_result = pyav_init(video_result)\n video_result['video_reader'] = pyav_init_result['video_reader']\n\n pyav_decode = PyAVDecode()\n pyav_decode_result = pyav_decode(video_result)\n assert self.check_keys_contain(pyav_decode_result.keys(), target_keys)\n assert pyav_decode_result['original_shape'] == (256, 340)\n assert np.shape(pyav_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n assert repr(pyav_decode) == (f'{pyav_decode.__class__.__name__}('\n f'multi_thread={False})')\n\n # test PyAV with 1 dim input and start_index = 0\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(0, self.total_frames, 5)\n pyav_init = PyAVInit()\n pyav_init_result = pyav_init(video_result)\n video_result['video_reader'] = pyav_init_result['video_reader']\n\n pyav_decode = PyAVDecode()\n pyav_decode_result = pyav_decode(video_result)\n assert self.check_keys_contain(pyav_decode_result.keys(), target_keys)\n assert pyav_decode_result['original_shape'] == (256, 340)\n assert np.shape(pyav_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n\n # PyAV with multi thread and start_index = 0\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(0, self.total_frames, 5)\n pyav_init = PyAVInit()\n pyav_init_result = pyav_init(video_result)\n video_result['video_reader'] = pyav_init_result['video_reader']\n\n pyav_decode = PyAVDecode(multi_thread=True)\n pyav_decode_result = pyav_decode(video_result)\n assert self.check_keys_contain(pyav_decode_result.keys(), target_keys)\n assert pyav_decode_result['original_shape'] == (256, 340)\n assert np.shape(pyav_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n assert repr(pyav_decode) == (f'{pyav_decode.__class__.__name__}('\n f'multi_thread={True})')\n\n # test PyAV with 2 dim input\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(1, self.total_frames,\n 2)[:, np.newaxis]\n pyav_init = PyAVInit()\n pyav_init_result = pyav_init(video_result)\n video_result['video_reader'] = pyav_init_result['video_reader']\n\n pyav_decode = PyAVDecode()\n pyav_decode_result = pyav_decode(video_result)\n assert self.check_keys_contain(pyav_decode_result.keys(), target_keys)\n assert pyav_decode_result['original_shape'] == (256, 340)\n assert np.shape(pyav_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n\n # test PyAV with 1 dim input\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(1, self.total_frames, 5)\n pyav_init = PyAVInit()\n pyav_init_result = pyav_init(video_result)\n video_result['video_reader'] = pyav_init_result['video_reader']\n\n pyav_decode = PyAVDecode()\n pyav_decode_result = pyav_decode(video_result)\n assert self.check_keys_contain(pyav_decode_result.keys(), target_keys)\n assert pyav_decode_result['original_shape'] == (256, 340)\n assert np.shape(pyav_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n\n # PyAV with multi thread\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(1, self.total_frames, 5)\n pyav_init = PyAVInit()\n pyav_init_result = pyav_init(video_result)\n video_result['video_reader'] = pyav_init_result['video_reader']\n\n pyav_decode = PyAVDecode(multi_thread=True)\n pyav_decode_result = pyav_decode(video_result)\n assert self.check_keys_contain(pyav_decode_result.keys(), target_keys)\n assert pyav_decode_result['original_shape'] == (256, 340)\n assert np.shape(pyav_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n\n assert repr(pyav_decode) == pyav_decode.__class__.__name__ + \\\n f'(multi_thread={True})'\n\n def test_decord_init(self):\n target_keys = ['video_reader', 'total_frames']\n video_result = copy.deepcopy(self.video_results)\n decord_init = DecordInit()\n decord_init_result = decord_init(video_result)\n assert self.check_keys_contain(decord_init_result.keys(), target_keys)\n assert decord_init_result['total_frames'] == len(\n decord_init_result['video_reader'])\n assert repr(decord_init) == (f'{decord_init.__class__.__name__}('\n f'io_backend=disk, '\n f'num_threads={1})')\n\n def test_decord_decode(self):\n target_keys = ['frame_inds', 'imgs', 'original_shape']\n\n # test Decord with 2 dim input and start_index = 0\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(0, self.total_frames,\n 3)[:, np.newaxis]\n decord_init = DecordInit()\n decord_init_result = decord_init(video_result)\n video_result['video_reader'] = decord_init_result['video_reader']\n\n decord_decode = DecordDecode()\n decord_decode_result = decord_decode(video_result)\n assert self.check_keys_contain(decord_decode_result.keys(),\n target_keys)\n assert decord_decode_result['original_shape'] == (256, 340)\n assert np.shape(decord_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n\n # test Decord with 1 dim input and start_index = 0\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(0, self.total_frames, 3)\n decord_init = DecordInit()\n decord_init_result = decord_init(video_result)\n video_result['video_reader'] = decord_init_result['video_reader']\n\n decord_decode = DecordDecode()\n decord_decode_result = decord_decode(video_result)\n assert self.check_keys_contain(decord_decode_result.keys(),\n target_keys)\n assert decord_decode_result['original_shape'] == (256, 340)\n assert np.shape(decord_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n\n # test Decord with 2 dim input and start_index = 0\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(0, self.total_frames,\n 3)[:, np.newaxis]\n decord_init = DecordInit()\n decord_init_result = decord_init(video_result)\n video_result['video_reader'] = decord_init_result['video_reader']\n\n decord_decode = DecordDecode()\n decord_decode_result = decord_decode(video_result)\n assert self.check_keys_contain(decord_decode_result.keys(),\n target_keys)\n assert decord_decode_result['original_shape'] == (256, 340)\n assert np.shape(decord_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n\n # test Decord with 1 dim input\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(1, self.total_frames, 3)\n decord_init = DecordInit()\n decord_init_result = decord_init(video_result)\n video_result['video_reader'] = decord_init_result['video_reader']\n\n decord_decode = DecordDecode()\n decord_decode_result = decord_decode(video_result)\n assert self.check_keys_contain(decord_decode_result.keys(),\n target_keys)\n assert decord_decode_result['original_shape'] == (256, 340)\n assert np.shape(decord_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n\n def test_opencv_init(self):\n target_keys = ['new_path', 'video_reader', 'total_frames']\n video_result = copy.deepcopy(self.video_results)\n opencv_init = OpenCVInit()\n opencv_init_result = opencv_init(video_result)\n assert self.check_keys_contain(opencv_init_result.keys(), target_keys)\n assert opencv_init_result['total_frames'] == len(\n opencv_init_result['video_reader'])\n assert repr(opencv_init) == (f'{opencv_init.__class__.__name__}('\n f'io_backend=disk)')\n\n def test_opencv_decode(self):\n target_keys = ['frame_inds', 'imgs', 'original_shape']\n\n # test OpenCV with 2 dim input when start_index = 0\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(0, self.total_frames,\n 2)[:, np.newaxis]\n opencv_init = OpenCVInit()\n opencv_init_result = opencv_init(video_result)\n video_result['video_reader'] = opencv_init_result['video_reader']\n\n opencv_decode = OpenCVDecode()\n opencv_decode_result = opencv_decode(video_result)\n assert self.check_keys_contain(opencv_decode_result.keys(),\n target_keys)\n assert opencv_decode_result['original_shape'] == (256, 340)\n assert np.shape(opencv_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n\n # test OpenCV with 2 dim input\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(1, self.total_frames,\n 2)[:, np.newaxis]\n opencv_init = OpenCVInit()\n opencv_init_result = opencv_init(video_result)\n video_result['video_reader'] = opencv_init_result['video_reader']\n\n opencv_decode = OpenCVDecode()\n opencv_decode_result = opencv_decode(video_result)\n assert self.check_keys_contain(opencv_decode_result.keys(),\n target_keys)\n assert opencv_decode_result['original_shape'] == (256, 340)\n assert np.shape(opencv_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n\n # test OpenCV with 1 dim input when start_index = 0\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(0, self.total_frames, 3)\n opencv_init = OpenCVInit()\n opencv_init_result = opencv_init(video_result)\n video_result['video_reader'] = opencv_init_result['video_reader']\n\n # test OpenCV with 1 dim input\n video_result = copy.deepcopy(self.video_results)\n video_result['frame_inds'] = np.arange(1, self.total_frames, 3)\n opencv_init = OpenCVInit()\n opencv_init_result = opencv_init(video_result)\n video_result['video_reader'] = opencv_init_result['video_reader']\n\n opencv_decode = OpenCVDecode()\n opencv_decode_result = opencv_decode(video_result)\n assert self.check_keys_contain(opencv_decode_result.keys(),\n target_keys)\n assert opencv_decode_result['original_shape'] == (256, 340)\n assert np.shape(opencv_decode_result['imgs']) == (len(\n video_result['frame_inds']), 256, 340, 3)\n\n def test_rawframe_selector(self):\n\n with pytest.warns(UserWarning):\n FrameSelector(io_backend='disk')\n\n def test_rawframe_decode(self):\n target_keys = ['frame_inds', 'imgs', 'original_shape', 'modality']\n\n # test frame selector with 2 dim input\n inputs = copy.deepcopy(self.frame_results)\n inputs['frame_inds'] = np.arange(0, self.total_frames, 2)[:,\n np.newaxis]\n # since the test images start with index 1, we plus 1 to frame_inds\n # in order to pass the CI\n inputs['frame_inds'] = inputs['frame_inds'] + 1\n frame_selector = RawFrameDecode(io_backend='disk')\n results = frame_selector(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,\n 320, 3)\n assert results['original_shape'] == (240, 320)\n\n # test frame selector with 2 dim input\n inputs = copy.deepcopy(self.frame_results)\n inputs['frame_inds'] = np.arange(1, self.total_frames, 2)[:,\n np.newaxis]\n frame_selector = RawFrameDecode(io_backend='disk')\n results = frame_selector(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,\n 320, 3)\n assert results['original_shape'] == (240, 320)\n\n # test frame selector with 1 dim input when start_index = 0\n inputs = copy.deepcopy(self.frame_results)\n inputs['frame_inds'] = np.arange(0, self.total_frames, 5)\n # since the test images start with index 1, we plus 1 to frame_inds\n # in order to pass the CI\n inputs['frame_inds'] = inputs['frame_inds'] + 1\n frame_selector = RawFrameDecode(io_backend='disk')\n results = frame_selector(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,\n 320, 3)\n assert results['original_shape'] == (240, 320)\n\n # test frame selector with 1 dim input\n inputs = copy.deepcopy(self.frame_results)\n inputs['frame_inds'] = np.arange(1, self.total_frames, 5)\n frame_selector = RawFrameDecode(io_backend='disk')\n results = frame_selector(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,\n 320, 3)\n assert results['original_shape'] == (240, 320)\n\n # test frame selector with 1 dim input\n inputs = copy.deepcopy(self.frame_results)\n inputs['frame_inds'] = np.arange(0, self.total_frames, 2)\n # since the test images start with index 1, we plus 1 to frame_inds\n # in order to pass the CI\n inputs['frame_inds'] = inputs['frame_inds'] + 1\n frame_selector = RawFrameDecode(io_backend='disk')\n results = frame_selector(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,\n 320, 3)\n assert results['original_shape'] == (240, 320)\n\n # test frame selector with 1 dim input\n inputs = copy.deepcopy(self.frame_results)\n inputs['frame_inds'] = np.arange(1, self.total_frames, 2)\n frame_selector = RawFrameDecode(io_backend='disk')\n results = frame_selector(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,\n 320, 3)\n assert results['original_shape'] == (240, 320)\n\n # test frame selector with 1 dim input for flow images\n inputs = copy.deepcopy(self.flow_frame_results)\n inputs['frame_inds'] = np.arange(0, self.total_frames, 2)\n # since the test images start with index 1, we plus 1 to frame_inds\n # in order to pass the CI\n inputs['frame_inds'] = inputs['frame_inds'] + 1\n frame_selector = RawFrameDecode(io_backend='disk')\n results = frame_selector(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert np.shape(results['imgs']) == (len(inputs['frame_inds']) * 2,\n 240, 320)\n assert results['original_shape'] == (240, 320)\n\n # test frame selector with 1 dim input for flow images\n inputs = copy.deepcopy(self.flow_frame_results)\n inputs['frame_inds'] = np.arange(1, self.total_frames, 2)\n frame_selector = RawFrameDecode(io_backend='disk')\n results = frame_selector(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert np.shape(results['imgs']) == (len(inputs['frame_inds']) * 2,\n 240, 320)\n assert results['original_shape'] == (240, 320)\n\n # test frame selector in turbojpeg decording backend\n # when start_index = 0\n inputs = copy.deepcopy(self.frame_results)\n inputs['frame_inds'] = np.arange(0, self.total_frames, 5)\n # since the test images start with index 1, we plus 1 to frame_inds\n # in order to pass the CI\n inputs['frame_inds'] = inputs['frame_inds'] + 1\n frame_selector = RawFrameDecode(\n io_backend='disk', decoding_backend='turbojpeg')\n results = frame_selector(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,\n 320, 3)\n assert results['original_shape'] == (240, 320)\n\n # test frame selector in turbojpeg decording backend\n inputs = copy.deepcopy(self.frame_results)\n inputs['frame_inds'] = np.arange(1, self.total_frames, 5)\n frame_selector = RawFrameDecode(\n io_backend='disk', decoding_backend='turbojpeg')\n results = frame_selector(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,\n 320, 3)\n assert results['original_shape'] == (240, 320)\n assert repr(frame_selector) == (f'{frame_selector.__class__.__name__}('\n f'io_backend=disk, '\n f'decoding_backend=turbojpeg)')\n\n def test_load_localization_feature(self):\n target_keys = ['raw_feature']\n\n action_result = copy.deepcopy(self.action_results)\n\n # test error cases\n with pytest.raises(NotImplementedError):\n load_localization_feature = LoadLocalizationFeature(\n 'unsupport_ext')\n\n # test normal cases\n load_localization_feature = LoadLocalizationFeature()\n load_localization_feature_result = load_localization_feature(\n action_result)\n assert self.check_keys_contain(load_localization_feature_result.keys(),\n target_keys)\n assert load_localization_feature_result['raw_feature'].shape == (400,\n 5)\n assert repr(load_localization_feature) == (\n f'{load_localization_feature.__class__.__name__}('\n f'raw_feature_ext=.csv)')\n\n def test_generate_localization_label(self):\n action_result = copy.deepcopy(self.action_results)\n action_result['raw_feature'] = np.random.randn(400, 5)\n\n # test default setting\n target_keys = ['gt_bbox']\n generate_localization_labels = GenerateLocalizationLabels()\n generate_localization_labels_result = generate_localization_labels(\n action_result)\n assert self.check_keys_contain(\n generate_localization_labels_result.keys(), target_keys)\n\n assert_array_almost_equal(\n generate_localization_labels_result['gt_bbox'], [[0.375, 0.625]],\n decimal=4)\n\n def test_load_proposals(self):\n target_keys = [\n 'bsp_feature', 'tmin', 'tmax', 'tmin_score', 'tmax_score',\n 'reference_temporal_iou'\n ]\n\n action_result = copy.deepcopy(self.action_results)\n\n # test error cases\n with pytest.raises(NotImplementedError):\n load_proposals = LoadProposals(5, self.proposals_dir,\n self.bsp_feature_dir,\n 'unsupport_ext')\n\n with pytest.raises(NotImplementedError):\n load_proposals = LoadProposals(5, self.proposals_dir,\n self.bsp_feature_dir, '.csv',\n 'unsupport_ext')\n\n # test normal cases\n load_proposals = LoadProposals(5, self.proposals_dir,\n self.bsp_feature_dir)\n load_proposals_result = load_proposals(action_result)\n assert self.check_keys_contain(load_proposals_result.keys(),\n target_keys)\n assert (load_proposals_result['bsp_feature'].shape[0] == 5)\n assert load_proposals_result['tmin'].shape == (5, )\n assert_array_almost_equal(\n load_proposals_result['tmin'], np.arange(0.1, 0.6, 0.1), decimal=4)\n assert load_proposals_result['tmax'].shape == (5, )\n assert_array_almost_equal(\n load_proposals_result['tmax'], np.arange(0.2, 0.7, 0.1), decimal=4)\n assert load_proposals_result['tmin_score'].shape == (5, )\n assert_array_almost_equal(\n load_proposals_result['tmin_score'],\n np.arange(0.95, 0.90, -0.01),\n decimal=4)\n assert load_proposals_result['tmax_score'].shape == (5, )\n assert_array_almost_equal(\n load_proposals_result['tmax_score'],\n np.arange(0.96, 0.91, -0.01),\n decimal=4)\n assert load_proposals_result['reference_temporal_iou'].shape == (5, )\n assert_array_almost_equal(\n load_proposals_result['reference_temporal_iou'],\n np.arange(0.85, 0.80, -0.01),\n decimal=4)\n assert repr(load_proposals) == (\n f'{load_proposals.__class__.__name__}('\n f'top_k={5}, '\n f'pgm_proposals_dir={self.proposals_dir}, '\n f'pgm_features_dir={self.bsp_feature_dir}, '\n f'proposal_ext=.csv, '\n f'feature_ext=.npy)')\n\n def test_audio_decode_init(self):\n target_keys = ['audios', 'length', 'sample_rate']\n inputs = copy.deepcopy(self.audio_results)\n audio_decode_init = AudioDecodeInit()\n results = audio_decode_init(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n\n # test when no audio file exists\n inputs = copy.deepcopy(self.audio_results)\n inputs['audio_path'] = 'foo/foo/bar.wav'\n audio_decode_init = AudioDecodeInit()\n results = audio_decode_init(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert results['audios'].shape == (10.0 *\n audio_decode_init.sample_rate, )\n assert repr(audio_decode_init) == (\n f'{audio_decode_init.__class__.__name__}('\n f'io_backend=disk, '\n f'sample_rate=16000, '\n f'pad_method=zero)')\n\n def test_audio_decode(self):\n target_keys = ['frame_inds', 'audios']\n inputs = copy.deepcopy(self.audio_results)\n inputs['frame_inds'] = np.arange(0, self.audio_total_frames,\n 2)[:, np.newaxis]\n inputs['num_clips'] = 1\n inputs['length'] = 1280\n audio_selector = AudioDecode()\n results = audio_selector(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n\n def test_load_audio_feature(self):\n target_keys = ['audios']\n inputs = copy.deepcopy(self.audio_feature_results)\n load_audio_feature = LoadAudioFeature()\n results = load_audio_feature(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n\n # test when no audio feature file exists\n inputs = copy.deepcopy(self.audio_feature_results)\n inputs['audio_path'] = 'foo/foo/bar.npy'\n load_audio_feature = LoadAudioFeature()\n results = load_audio_feature(inputs)\n assert results['audios'].shape == (640, 80)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert repr(load_audio_feature) == (\n f'{load_audio_feature.__class__.__name__}('\n f'pad_method=zero)')\n\n def test_audio_feature_selector(self):\n target_keys = ['audios']\n # test frame selector with 2 dim input\n inputs = copy.deepcopy(self.audio_feature_results)\n inputs['frame_inds'] = np.arange(0, self.audio_total_frames,\n 2)[:, np.newaxis]\n inputs['num_clips'] = 1\n inputs['length'] = 1280\n audio_feature_selector = AudioFeatureSelector()\n results = audio_feature_selector(inputs)\n assert self.check_keys_contain(results.keys(), target_keys)\n assert repr(audio_feature_selector) == (\n f'{audio_feature_selector.__class__.__name__}('\n f'fix_length={128})')\n" ]
[ [ "torch.zeros", "numpy.max", "numpy.array", "torch.eq", "numpy.testing.assert_array_equal", "numpy.random.randn", "numpy.min", "numpy.shape", "numpy.testing.assert_array_almost_equal", "numpy.arange" ] ]
shantanuwadnerkar/deepdrive-zero
[ "3134a5b092a53ff60e4207d7419fd6a19cb5a6e9" ]
[ "deepdrive_zero/player.py" ]
[ "import math\nfrom math import cos, sin, pi\nimport os\nimport sys\nfrom random import random\nfrom typing import List\n\nimport numpy as np\n\nfrom loguru import logger as log\n\nimport arcade\nimport arcade.color as color\nfrom deepdrive_zero.constants import SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_MARGIN, \\\n MAP_WIDTH_PX, MAP_HEIGHT_PX, PLAYER_TURN_RADIANS_PER_KEYSTROKE, \\\n SCREEN_TITLE, \\\n CHARACTER_SCALING, MAX_PIXELS_PER_SEC_SQ, TESLA_LENGTH, VOYAGE_VAN_LENGTH, \\\n USE_VOYAGE, VEHICLE_PNG, MAX_METERS_PER_SEC_SQ, MAP_IMAGE\n# Constants\nfrom deepdrive_zero.envs.env import Deepdrive2DEnv\nfrom deepdrive_zero.map_gen import get_intersection\n\nDRAW_COLLISION_BOXES = True\nDRAW_WAYPOINT_VECTORS = False\nDRAW_INTERSECTION = True\n\n# TODO: Calculate rectangle points and confirm corners are at same location in\n# arcade.\n\n\n# noinspection PyAbstractClass\nclass Deepdrive2DPlayer(arcade.Window):\n \"\"\"Allows playing the env as a human\"\"\"\n def __init__(self, add_rotational_friction=True,\n add_longitudinal_friction=True, env=None,\n fps=60, static_obstacle=False, one_waypoint=False,\n is_intersection_map=False, env_config=None):\n\n # Call the parent class and set up the window\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE,\n update_rate=1/fps)\n self.add_rotational_friction = add_rotational_friction\n self.add_longitudinal_friction = add_longitudinal_friction\n self.fps = fps\n\n arcade.set_background_color(arcade.csscolor.CORNFLOWER_BLUE)\n self.player_list = None\n self.physics_engine = None\n self.human_controlled = False if env else True\n self.env: Deepdrive2DEnv = env\n self.steer = 0\n self.accel = 0\n self.brake = 0\n self.map = None\n self.angle = None\n self.background = None\n self.max_accel = None\n self.px_per_m = None\n self.static_obstacle = static_obstacle\n self.is_intersection_map = is_intersection_map\n self.one_waypoint = one_waypoint\n\n self.env_config = env_config\n\n def setup(self):\n \"\"\" Set up the game here. Call this function to restart the game. \"\"\"\n self.player_list = arcade.SpriteList()\n # self.background = arcade.load_texture(MAP_IMAGE)\n\n vehicle_length_pixels = arcade.Sprite(\n VEHICLE_PNG, CHARACTER_SCALING).height\n if USE_VOYAGE:\n vehicle_length_meters = VOYAGE_VAN_LENGTH\n else:\n vehicle_length_meters = TESLA_LENGTH\n self.px_per_m = vehicle_length_pixels / vehicle_length_meters\n self.max_accel = MAX_PIXELS_PER_SEC_SQ / self.px_per_m\n\n if self.env is None:\n self.env = Deepdrive2DEnv(\n px_per_m=self.px_per_m,\n add_rotational_friction=self.add_rotational_friction,\n add_longitudinal_friction=self.add_longitudinal_friction,\n return_observation_as_array=False,\n expect_normalized_actions=False,\n expect_normalized_action_deltas=False,\n decouple_step_time=True,\n physics_steps_per_observation=1,\n add_static_obstacle=self.static_obstacle,\n is_one_waypoint_map=self.one_waypoint,\n is_intersection_map=self.is_intersection_map,)\n\n self.env.configure_env(self.env_config)\n\n self.static_obstacle = (self.static_obstacle or\n (self.env and\n self.env.unwrapped.add_static_obstacle))\n self.is_intersection_map = (self.is_intersection_map or\n (self.env and\n self.env.unwrapped.is_intersection_map))\n self.one_waypoint = (self.one_waypoint or\n (self.env and\n self.env.unwrapped.is_one_waypoint_map))\n\n self.env.reset()\n\n for i, agent in enumerate(self.env.all_agents):\n sprite = arcade.Sprite(VEHICLE_PNG, CHARACTER_SCALING)\n sprite.center_x = agent.map.x_pixels[0]\n sprite.center_y = agent.map.y_pixels[0]\n self.player_list.append(sprite)\n\n\n def on_draw(self):\n arcade.start_render()\n\n for agent in self.env.all_agents:\n self.draw_agent_objects(agent)\n\n if self.is_intersection_map:\n self.draw_intersection()\n\n # arcade.draw_line(300, 300, 300 + self.player_sprite.height, 300,\n # arcade.color.WHITE)\n # arcade.draw_lines(self.map, arcade.color.ORANGE, 3)\n # arcade.draw_point(self.heading_x, self.heading_y,\n # arcade.color.WHITE, 10)\n\n self.player_list.draw() # Draw the car\n\n def draw_agent_objects(self, agent):\n a = agent\n e = self.env\n m = a.map\n ppm = e.px_per_m\n angle = a.angle\n theta = angle + pi / 2\n if self.env.is_one_waypoint_map:\n arcade.draw_circle_filled(\n center_x=m.x_pixels[1],\n center_y=m.y_pixels[1],\n radius=21,\n color=color.ORANGE)\n if self.static_obstacle:\n static_obst_pixels = m.static_obst_pixels\n arcade.draw_line(\n static_obst_pixels[0][0],\n static_obst_pixels[0][1],\n static_obst_pixels[1][0],\n static_obst_pixels[1][1],\n color=color.BLACK_OLIVE,\n line_width=5,\n )\n elif self.is_intersection_map:\n if agent.agent_index == 0:\n wp_clr = (10, 210, 50)\n else:\n wp_clr = (250, 140, 20)\n for i in range(len(m.waypoints)):\n arcade.draw_circle_filled(\n center_x=m.x_pixels[i],\n center_y=m.y_pixels[i],\n radius=21,\n color=wp_clr)\n else:\n # Draw the background texture\n bg_scale = 1.1\n arcade.draw_texture_rectangle(\n MAP_WIDTH_PX // 2 + SCREEN_MARGIN,\n MAP_HEIGHT_PX // 2 + SCREEN_MARGIN,\n MAP_WIDTH_PX * bg_scale,\n MAP_HEIGHT_PX * bg_scale,\n self.background)\n if a.ego_rect is not None and DRAW_COLLISION_BOXES:\n arcade.draw_rectangle_outline(\n center_x=a.x * ppm, center_y=a.y * ppm,\n width=a.vehicle_width * ppm,\n height=a.vehicle_height * ppm, color=color.LIME_GREEN,\n border_width=2, tilt_angle=math.degrees(a.angle),\n )\n arcade.draw_points(point_list=(a.ego_rect * ppm).tolist(),\n color=color.YELLOW, size=3)\n if a.front_to_waypoint is not None and DRAW_WAYPOINT_VECTORS:\n ftw = a.front_to_waypoint\n\n fy = a.front_y\n fx = a.front_x\n\n # arcade.draw_line(\n # start_x=e.front_x * ppm,\n # start_y=e.front_y * ppm,\n # end_x=(e.front_x + ftw[0]) * ppm,\n # end_y=(e.front_y + ftw[1]) * ppm,\n # color=c.LIME_GREEN,\n # line_width=2,\n # )\n\n arcade.draw_line(\n start_x=fx * ppm,\n start_y=fy * ppm,\n end_x=(fx + cos(\n theta - a.angle_to_waypoint) * a.distance_to_end) * ppm,\n end_y=(fy + sin(\n theta - a.angle_to_waypoint) * a.distance_to_end) * ppm,\n color=color.PURPLE,\n line_width=2,\n )\n\n # Center to front length\n ctf = a.vehicle_height / 2\n\n arcade.draw_line(\n start_x=a.x * ppm,\n start_y=a.y * ppm,\n end_x=(a.x + cos(theta) * 20) * ppm,\n end_y=(a.y + sin(theta) * 20) * ppm,\n color=color.LIGHT_RED_OCHRE,\n line_width=2,\n )\n\n arcade.draw_line(\n start_x=fx * ppm,\n start_y=fy * ppm,\n end_x=(fx + a.heading[0]) * ppm,\n end_y=(fy + a.heading[1]) * ppm,\n color=color.BLUE,\n line_width=2,\n )\n\n arcade.draw_circle_filled(\n center_x=fx * ppm,\n center_y=fy * ppm,\n radius=5,\n color=color.YELLOW)\n\n arcade.draw_circle_filled(\n center_x=a.x * ppm,\n center_y=a.y * ppm,\n radius=5,\n color=color.WHITE_SMOKE, )\n\n arcade.draw_circle_filled(\n center_x=a.static_obstacle_points[0][0] * ppm,\n center_y=a.static_obstacle_points[0][1] * ppm,\n radius=5,\n color=color.WHITE_SMOKE, )\n\n arcade.draw_circle_filled(\n center_x=a.static_obstacle_points[1][0] * ppm,\n center_y=a.static_obstacle_points[1][1] * ppm,\n radius=5,\n color=color.WHITE_SMOKE, )\n\n if a.static_obst_angle_info is not None:\n start_obst_dist, end_obst_dist, start_obst_angle, end_obst_angle = \\\n a.static_obst_angle_info\n\n # start_obst_theta = start_obst_angle\n # arcade.draw_line(\n # start_x=fx * ppm,\n # start_y=fy * ppm,\n # end_x=(fx + cos(start_obst_theta) * start_obst_dist) * ppm,\n # end_y=(fy + sin(start_obst_theta) * start_obst_dist) * ppm,\n # color=c.BLACK,\n # line_width=2,)\n\n # log.info('DRAWING LINES')\n\n arcade.draw_line(\n start_x=fx * ppm,\n start_y=fy * ppm,\n end_x=(fx + cos(\n theta - start_obst_angle) * start_obst_dist) * ppm,\n end_y=(fy + sin(\n theta - start_obst_angle) * start_obst_dist) * ppm,\n color=color.BLUE,\n line_width=2, )\n\n p_x = a.front_x + cos(theta + pi / 6) * 20\n p_y = a.front_y + sin(theta + pi / 6) * 20\n pole_test = np.array((p_x, p_y))\n pole_angle = a.get_angle_to_point(pole_test)\n\n arcade.draw_circle_filled(\n center_x=pole_test[0] * ppm,\n center_y=pole_test[1] * ppm,\n radius=5,\n color=color.WHITE_SMOKE, )\n\n arcade.draw_line(\n start_x=fx * ppm,\n start_y=fy * ppm,\n end_x=(fx + cos(\n (angle + math.pi / 2) - pole_angle) * 20) * ppm,\n end_y=(fy + sin(\n (angle + math.pi / 2) - pole_angle) * 20) * ppm,\n color=color.BRIGHT_GREEN,\n line_width=2, )\n\n # arcade.draw_line(\n # start_x=fx * ppm,\n # start_y=fy * ppm,\n # end_x=(fx + cos((angle + math.pi / 2) - end_obst_angle) * end_obst_dist) * ppm,\n # end_y=(fy + sin((angle + math.pi / 2) - end_obst_angle) * end_obst_dist) * ppm,\n # color=c.RED,\n # line_width=2,)\n\n arcade.draw_line(\n start_x=fx * ppm,\n start_y=fy * ppm,\n end_x=(a.static_obstacle_points[1][0]) * ppm,\n end_y=(a.static_obstacle_points[1][1]) * ppm,\n color=color.RED,\n line_width=2, )\n\n def draw_intersection(self):\n lines, lane_width = get_intersection()\n\n bottom_horiz, left_vert, mid_horiz, mid_vert, right_vert, top_horiz = lines\n self.draw_intersection_line(left_vert)\n self.draw_intersection_line(mid_vert)\n self.draw_intersection_line(right_vert)\n self.draw_intersection_line(top_horiz)\n self.draw_intersection_line(mid_horiz)\n self.draw_intersection_line(bottom_horiz)\n\n def draw_intersection_line(self, line):\n line = line * self.px_per_m\n arcade.draw_line(\n line[0][0], line[0][1], line[1][0], line[1][1],\n color=(100, 200, 240),\n line_width=2,\n )\n\n def on_key_press(self, key, modifiers):\n \"\"\"Called whenever a key is pressed. \"\"\"\n if key == arcade.key.UP or key == arcade.key.W:\n self.accel = MAX_METERS_PER_SEC_SQ\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.accel = -MAX_METERS_PER_SEC_SQ\n elif key == arcade.key.SPACE:\n self.brake = 1.0\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.steer = math.pi * PLAYER_TURN_RADIANS_PER_KEYSTROKE\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.steer = -math.pi * PLAYER_TURN_RADIANS_PER_KEYSTROKE\n\n def on_key_release(self, key, modifiers):\n \"\"\"Called when the user releases a key. \"\"\"\n\n if key == arcade.key.UP or key == arcade.key.W:\n self.accel = 0\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.accel = 0\n elif key == arcade.key.SPACE:\n self.brake = 0\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.steer = 0\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.steer = 0\n\n def update(self, _delta_time):\n \"\"\" Movement and game logic \"\"\"\n env = self.env\n for i, agent in enumerate(env.all_agents):\n sprite = self.player_list[i]\n\n # log.trace(f'v:{a.speed}')\n # log.trace(f'a:{self.accel}')\n # log.trace(f'dt2:{_delta_time}')\n\n if self.human_controlled:\n if env.agent_index == 0:\n steer = self.steer\n accel = self.accel\n brake = self.brake\n else:\n steer = 0\n accel = random()\n brake = 0\n\n # Prev obs for next agent!\n obz, reward, done, info = env.step([steer, accel, brake])\n\n if agent.done:\n agent.reset()\n\n # log.debug(f'Deviation: '\n # f'{obz.lane_deviation / self.rough_pixels_per_meter}')\n\n\n sprite.center_x = agent.x * self.px_per_m\n sprite.center_y = agent.y * self.px_per_m\n\n # TODO: Change rotation axis to rear axle?? (now at center)\n sprite.angle = math.degrees(agent.angle)\n\n # log.trace(f'x:{a.x}')\n # log.trace(f'y:{a.y}')\n # log.trace(f'angle:{self.sprite.angle}')\n\n\ndef start(env=None, fps=60, env_config=None):\n player = Deepdrive2DPlayer(\n static_obstacle='--static-obstacle' in sys.argv,\n one_waypoint='--one-waypoint-map' in sys.argv,\n is_intersection_map='--intersection' in sys.argv,\n env=env,\n fps=fps,\n env_config=env_config\n )\n player.setup()\n if 'DISABLE_GC' in os.environ:\n import gc\n log.warning('Disabling garbage collection!')\n gc.disable()\n\n if env is None:\n # Otherwise, env is calling the renders/blits/etc...\n arcade.run()\n\n return player\n\n\nif __name__ == \"__main__\":\n start()\n" ]
[ [ "numpy.array" ] ]
GillianGrayson/qs
[ "e3d80b75dc4879bd1deb9e46b17583a452c2ccab" ]
[ "scripts/netket/mbl/cluster/submit_rhos.py" ]
[ "import pathlib\nimport os.path\nimport pandas as pd\nimport numpy as np\nimport socket\n\nhost_name = socket.gethostname()\nprint(host_name)\n\nrun_type = 'short'\n\nif host_name == \"newton\":\n path = '/data/biophys/denysov/yusipov/qs'\nelif host_name == \"master\":\n path = '/common/home/yusipov_i/data/qs'\n\nN = 8\nWs = np.linspace(0.0, 20.0, 101)\nU = 1.0\nJ = 1.0\ndiss_type = 1\ndiss_gamma = 0.1\n\nseed_start = 1\nseed_shift = 1\nseed_num = 1\nseed_chunks = 50\nseed_start_chunks = np.linspace(seed_start, seed_start + (seed_chunks-1) * seed_num, seed_chunks, dtype=int)\n\nalpha = 2.0\nbeta = 2.0\nn_samples = 10000\nn_iter = 500\n\nfor W_id, W in enumerate(Ws):\n print(f\"W={W:0.4f}\")\n\n for seed_start_chunk in seed_start_chunks:\n\n curr_path = path \\\n + '/' + f\"NDM({alpha:0.4f}_{beta:0.4f}_{n_samples:d}_{n_iter:d})\" \\\n + '/' + f\"H({W:0.4f}_{U:0.4f}_{J:0.4f})_D({diss_type:d}_{diss_gamma:0.4f})\" \\\n + '/' + f\"seeds({seed_start_chunk}_{seed_shift}_{seed_num})\"\n\n config_dict = {'experiment_id': [0]}\n config_df = pd.DataFrame(config_dict).set_index('experiment_id')\n config_df['N'] = N\n config_df['W'] = W\n config_df['U'] = U\n config_df['J'] = J\n config_df['diss_type'] = diss_type\n config_df['diss_gamma'] = diss_gamma\n config_df['seed_start'] = seed_start_chunk\n config_df['seed_shift'] = seed_shift\n config_df['seed_num'] = seed_num\n config_df['alpha'] = alpha\n config_df['beta'] = beta\n config_df['n_samples'] = n_samples\n config_df['n_iter'] = n_iter\n\n pathlib.Path(curr_path).mkdir(parents=True, exist_ok=True)\n\n fn_test = f\"{curr_path}/metrics_{seed_start_chunk}.xlsx\"\n\n if not os.path.isfile(fn_test):\n print(f\"{fn_test} does not exist!\")\n config_df.to_excel(f\"{curr_path}/config.xlsx\", index=True)\n if host_name == \"newton\":\n if run_type == 'short':\n os.system(f\"sbatch mpipks_run_short.sh \\\"{curr_path}\\\"\")\n elif run_type == 'medium':\n os.system(f\"sbatch mpipks_run_medium.sh \\\"{curr_path}\\\"\")\n elif host_name == \"master\":\n if run_type == 'short':\n os.system(f\"sbatch unn_run_short.sh \\\"{curr_path}\\\"\")\n elif run_type == 'medium':\n os.system(f\"sbatch unn_run_medium.sh \\\"{curr_path}\\\"\")\n else:\n test_df = pd.read_excel(fn_test, index_col='metrics')\n if test_df.isnull().values.any():\n print(\"Need recalc\")\n config_df.to_excel(f\"{curr_path}/config.xlsx\", index=True)\n if host_name == \"newton\":\n if run_type == 'short':\n os.system(f\"sbatch mpipks_run_short.sh \\\"{curr_path}\\\"\")\n elif run_type == 'medium':\n os.system(f\"sbatch mpipks_run_medium.sh \\\"{curr_path}\\\"\")\n elif host_name == \"master\":\n if run_type == 'short':\n os.system(f\"sbatch unn_run_short.sh \\\"{curr_path}\\\"\")\n elif run_type == 'medium':\n os.system(f\"sbatch unn_run_medium.sh \\\"{curr_path}\\\"\")\n" ]
[ [ "pandas.DataFrame", "numpy.linspace", "pandas.read_excel" ] ]
LawrenceDior/thetis
[ "fa4b14eeac1063f922ba24f03ebf7ecdf80b82ff" ]
[ "test/bottomFriction/test_ekman_surface.py" ]
[ "\"\"\"\nBottom Ekman layer test\n=======================\n\nSteady state flow in a channel subject to bottom friction and rotation.\nVertical viscosity is assumed to be constant to allow simple analytical\nsolution.\n\"\"\"\nfrom thetis import *\nimport numpy\nimport pytest\n\n\ndef run_test(layers=25, tolerance=0.05, verify=True, **model_options):\n depth = 20.0\n\n # set mesh resolution\n dx = 2500.0\n nx = 3\n lx = nx*dx\n ny = 3\n ly = ny*dx\n mesh2d = PeriodicRectangleMesh(nx, ny, lx, ly, direction='both',\n reorder=True)\n\n dt = 90.0\n t_end = 4 * 3600.0 # sufficient to reach ~steady state\n t_export = 450.0\n u_mag = 1.0\n\n f_coriolis = 1e-4\n nu_v = 5e-4\n\n # bathymetry\n p1_2d = get_functionspace(mesh2d, 'CG', 1)\n bathymetry2d = Function(p1_2d, name='Bathymetry')\n bathymetry2d.assign(depth)\n\n wind_stress_x = 0.1027 # Pa\n wind_stress_2d = Constant((wind_stress_x, 0))\n\n # create solver\n solver_obj = solver.FlowSolver(mesh2d, bathymetry2d, layers)\n options = solver_obj.options\n options.element_family = 'dg-dg'\n options.timestepper_type = 'SSPRK22'\n options.solve_salinity = False\n options.solve_temperature = False\n options.use_implicit_vertical_diffusion = True\n options.use_bottom_friction = False\n options.use_turbulence = False\n options.coriolis_frequency = Constant(f_coriolis)\n options.vertical_viscosity = Constant(nu_v)\n options.vertical_diffusivity = Constant(nu_v)\n options.wind_stress = wind_stress_2d\n options.simulation_export_time = t_export\n options.timestepper_options.use_automatic_timestep = False\n options.timestep = dt\n options.simulation_end_time = t_end\n options.horizontal_velocity_scale = Constant(u_mag)\n options.no_exports = True\n options.update(model_options)\n\n solver_obj.create_function_spaces()\n solver_obj.create_equations()\n\n x, y, z = SpatialCoordinate(solver_obj.mesh)\n\n # analytical solution\n rho0 = physical_constants['rho0']\n d = sqrt(2*nu_v/f_coriolis)\n a = sqrt(2)/(f_coriolis*d*rho0)*wind_stress_x\n z_s = z/d\n u_expr = a*exp(z_s)*cos(z_s - pi/4)\n v_expr = a*exp(z_s)*sin(z_s - pi/4)\n\n uv_ana_expr = as_vector((u_expr, v_expr, 0))\n uv_ana = Function(solver_obj.function_spaces.P1DGv, name='solution')\n uv_ana.interpolate(uv_ana_expr)\n\n # initialize with a linear v profile to speed-up convergence\n v_init_expr = conditional(z > -d, a*(1 + z_s), 0)\n solver_obj.assign_initial_conditions(uv_3d=as_vector((v_init_expr/3, -v_init_expr, 0)))\n\n solver_obj.iterate()\n\n if verify:\n uv_p1_dg = Function(solver_obj.function_spaces.P1DGv, name='velocity p1dg')\n uv_p1_dg.project(solver_obj.fields.uv_3d + solver_obj.fields.uv_dav_3d)\n volume = lx*ly*depth\n uv_l2_err = errornorm(uv_ana_expr, uv_p1_dg)/numpy.sqrt(volume)\n assert uv_l2_err < tolerance, 'L2 error is too large: {:} > {:}'.format(uv_l2_err, tolerance)\n print_output('L2 error {:.4f} PASSED'.format(uv_l2_err))\n\n return solver_obj\n\n\[email protected](params=['dg-dg', 'rt-dg', 'bdm-dg'])\ndef element_family(request):\n return request.param\n\n\[email protected](params=['LeapFrog', 'SSPRK22'])\ndef timestepper_type(request):\n return request.param\n\n\[email protected](\"nlayers,max_err\",\n [(25, 0.035), (5, 0.065)],\n ids=['nz25', 'nz5'])\ndef test_bottom_friction(nlayers, max_err, element_family, timestepper_type):\n run_test(nlayers, tolerance=max_err, verify=True,\n element_family=element_family, timestepper_type=timestepper_type)\n" ]
[ [ "numpy.sqrt" ] ]
ericf900/pvlib-python
[ "64a6498b689d27d77bb5967e13eab9bc0018d68e" ]
[ "docs/examples/plot_greensboro_kimber_soiling.py" ]
[ "\"\"\"\nKimber Soiling Model\n====================\n\nExamples of soiling using the Kimber model [1]_.\n\nReferences\n----------\n.. [1] \"The Effect of Soiling on Large Grid-Connected Photovoltaic Systems\n in California and the Southwest Region of the United States,\" Adrianne\n Kimber, et al., IEEE 4th World Conference on Photovoltaic Energy\n Conference, 2006, :doi:`10.1109/WCPEC.2006.279690`\n\"\"\"\n\n# %%\n# This example shows basic usage of pvlib's Kimber Soiling model with\n# :py:meth:`pvlib.losses.soiling_kimber`.\n#\n# The Kimber Soiling model assumes that soiling builds up at a constant rate\n# until cleaned either manually or by rain. The rain must reach a threshold to\n# clean the panels. When rains exceeds the threshold, it's assumed the earth is\n# damp for a grace period before it begins to soil again. There is a maximum\n# soiling build up that cannot be exceeded even if there's no rain or\n# manual cleaning.\n#\n# Threshold\n# ---------\n# The example shown here demonstrates how the threshold affects soiling.\n# Because soiling depends on rainfall, loading weather data is always the first\n# step.\n\nfrom datetime import datetime\nfrom matplotlib import pyplot as plt\nfrom pvlib.iotools import read_tmy3\nfrom pvlib.losses import soiling_kimber\nfrom pvlib.tests.conftest import DATA_DIR\n\n# get TMY3 data with rain\ngreensboro = read_tmy3(DATA_DIR / '723170TYA.CSV', coerce_year=1990)\n# NOTE: can't use Sand Point, AK b/c Lprecipdepth is -9900, ie: missing\ngreensboro_rain = greensboro[0].Lprecipdepth\n# calculate soiling with no wash dates\nTHRESHOLD = 25.0\nsoiling_no_wash = soiling_kimber(\n greensboro_rain, cleaning_threshold=THRESHOLD, istmy=True)\nsoiling_no_wash.name = 'soiling'\n# daily rain totals\ndaily_rain = greensboro_rain.iloc[:-1].resample('D').sum()\nplt.plot(\n daily_rain.index.to_pydatetime(), daily_rain.values/25.4,\n soiling_no_wash.index.to_pydatetime(), soiling_no_wash.values*100.0)\nplt.hlines(\n THRESHOLD/25.4, xmin=datetime(1990, 1, 1), xmax=datetime(1990, 12, 31),\n linestyles='--')\nplt.grid()\nplt.title(\n f'Kimber Soiling Model, dashed line shows threshold ({THRESHOLD}[mm])')\nplt.xlabel('timestamp')\nplt.ylabel('soiling build-up fraction [%] and daily rainfall [inches]')\nplt.legend(['daily rainfall [in]', 'soiling [%]'])\nplt.tight_layout()\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
Yuran-Zhao/bert-nmt
[ "e5095860e59b305bdcf1264ad117e67d3d97e705" ]
[ "fairseq/models/transformer.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom numpy.random import uniform\nfrom fairseq import options, utils\nfrom fairseq.models import (\n FairseqEncoder,\n FairseqIncrementalDecoder,\n FairseqEncoderDecoderModel,\n FairseqEncoderDecoderS3Model,\n register_model,\n register_model_architecture,\n)\nfrom fairseq.modules import (\n AdaptiveSoftmax,\n LayerNorm,\n MultiheadAttention,\n PositionalEmbedding,\n SinusoidalPositionalEmbedding,\n)\nfrom bert import BertTokenizer\n\nDEFAULT_MAX_SOURCE_POSITIONS = 1024\nDEFAULT_MAX_TARGET_POSITIONS = 1024\n\nfrom bert import BertModel\n\nfrom sem_extractor import BartTokenizer, SemExtractorEncoder\n\n\n@register_model('transformer')\nclass TransformerModel(FairseqEncoderDecoderModel):\n \"\"\"\n Transformer model from `\"Attention Is All You Need\" (Vaswani, et al, 2017)\n <https://arxiv.org/abs/1706.03762>`_.\n\n Args:\n encoder (TransformerEncoder): the encoder\n decoder (TransformerDecoder): the decoder\n\n The Transformer model provides the following named architectures and\n command-line arguments:\n\n .. argparse::\n :ref: fairseq.models.transformer_parser\n :prog:\n \"\"\"\n def __init__(self,\n encoder,\n decoder,\n bertencoder,\n berttokenizer,\n mask_cls_sep=False,\n args=None):\n super().__init__(encoder, decoder, bertencoder, berttokenizer,\n mask_cls_sep, args)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--activation-fn',\n choices=utils.get_available_activation_fns(),\n help='activation function to use')\n parser.add_argument('--dropout',\n type=float,\n metavar='D',\n help='dropout probability')\n parser.add_argument('--attention-dropout',\n type=float,\n metavar='D',\n help='dropout probability for attention weights')\n parser.add_argument('--activation-dropout',\n '--relu-dropout',\n type=float,\n metavar='D',\n help='dropout probability after activation in FFN.')\n parser.add_argument('--encoder-embed-path',\n type=str,\n metavar='STR',\n help='path to pre-trained encoder embedding')\n parser.add_argument('--encoder-embed-dim',\n type=int,\n metavar='N',\n help='encoder embedding dimension')\n parser.add_argument('--encoder-ffn-embed-dim',\n type=int,\n metavar='N',\n help='encoder embedding dimension for FFN')\n parser.add_argument('--encoder-layers',\n type=int,\n metavar='N',\n help='num encoder layers')\n parser.add_argument('--encoder-attention-heads',\n type=int,\n metavar='N',\n help='num encoder attention heads')\n parser.add_argument('--encoder-normalize-before',\n action='store_true',\n help='apply layernorm before each encoder block')\n parser.add_argument(\n '--encoder-learned-pos',\n action='store_true',\n help='use learned positional embeddings in the encoder')\n parser.add_argument('--decoder-embed-path',\n type=str,\n metavar='STR',\n help='path to pre-trained decoder embedding')\n parser.add_argument('--decoder-embed-dim',\n type=int,\n metavar='N',\n help='decoder embedding dimension')\n parser.add_argument('--decoder-ffn-embed-dim',\n type=int,\n metavar='N',\n help='decoder embedding dimension for FFN')\n parser.add_argument('--decoder-layers',\n type=int,\n metavar='N',\n help='num decoder layers')\n parser.add_argument('--decoder-attention-heads',\n type=int,\n metavar='N',\n help='num decoder attention heads')\n parser.add_argument(\n '--decoder-learned-pos',\n action='store_true',\n help='use learned positional embeddings in the decoder')\n parser.add_argument('--decoder-normalize-before',\n action='store_true',\n help='apply layernorm before each decoder block')\n parser.add_argument('--share-decoder-input-output-embed',\n action='store_true',\n help='share decoder input and output embeddings')\n parser.add_argument('--share-all-embeddings',\n action='store_true',\n help='share encoder, decoder and output embeddings'\n ' (requires shared dictionary and embed dim)')\n parser.add_argument(\n '--no-token-positional-embeddings',\n default=False,\n action='store_true',\n help=\n 'if set, disables positional embeddings (outside self attention)')\n parser.add_argument(\n '--adaptive-softmax-cutoff',\n metavar='EXPR',\n help='comma separated list of adaptive softmax cutoff points. '\n 'Must be used with adaptive_loss criterion'),\n parser.add_argument(\n '--adaptive-softmax-dropout',\n type=float,\n metavar='D',\n help='sets adaptive softmax dropout for the tail projections')\n # fmt: on\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if not hasattr(args, 'max_source_positions'):\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\n if not hasattr(args, 'max_target_positions'):\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\n\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n if len(task.datasets) > 0:\n src_berttokenizer = next(iter(task.datasets.values())).berttokenizer\n else:\n src_berttokenizer = BertTokenizer.from_pretrained(\n args.bert_model_name)\n\n def build_embedding(dictionary, embed_dim, path=None):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\n # if provided, load from preloaded dictionaries\n if path:\n embed_dict = utils.parse_embedding(path)\n utils.load_embedding(embed_dict, dictionary, emb)\n return emb\n\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError(\n '--share-all-embeddings requires a joined dictionary')\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim'\n )\n if args.decoder_embed_path and (args.decoder_embed_path !=\n args.encoder_embed_path):\n raise ValueError(\n '--share-all-embeddings not compatible with --decoder-embed-path'\n )\n encoder_embed_tokens = build_embedding(src_dict,\n args.encoder_embed_dim,\n args.encoder_embed_path)\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n encoder_embed_tokens = build_embedding(src_dict,\n args.encoder_embed_dim,\n args.encoder_embed_path)\n decoder_embed_tokens = build_embedding(tgt_dict,\n args.decoder_embed_dim,\n args.decoder_embed_path)\n bertencoder = BertModel.from_pretrained(args.bert_model_name)\n args.bert_out_dim = bertencoder.hidden_size\n encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)\n decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)\n\n return TransformerModel(encoder, decoder, bertencoder,\n src_berttokenizer, args.mask_cls_sep, args)\n\n @classmethod\n def build_encoder(cls, args, src_dict, embed_tokens):\n return TransformerEncoder(args, src_dict, embed_tokens)\n\n @classmethod\n def build_decoder(cls, args, tgt_dict, embed_tokens):\n return TransformerDecoder(args, tgt_dict, embed_tokens)\n\n\n@register_model('transformers2')\nclass TransformerS2Model(FairseqEncoderDecoderModel):\n \"\"\"\n Transformer model from `\"Attention Is All You Need\" (Vaswani, et al, 2017)\n <https://arxiv.org/abs/1706.03762>`_.\n\n Args:\n encoder (TransformerEncoder): the encoder\n decoder (TransformerDecoder): the decoder\n\n The Transformer model provides the following named architectures and\n command-line arguments:\n\n .. argparse::\n :ref: fairseq.models.transformer_parser\n :prog:\n \"\"\"\n def __init__(self,\n encoder,\n decoder,\n bertencoder,\n berttokenizer,\n mask_cls_sep=False,\n args=None):\n super().__init__(encoder, decoder, bertencoder, berttokenizer,\n mask_cls_sep, args)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--activation-fn',\n choices=utils.get_available_activation_fns(),\n help='activation function to use')\n parser.add_argument('--dropout',\n type=float,\n metavar='D',\n help='dropout probability')\n parser.add_argument('--attention-dropout',\n type=float,\n metavar='D',\n help='dropout probability for attention weights')\n parser.add_argument('--activation-dropout',\n '--relu-dropout',\n type=float,\n metavar='D',\n help='dropout probability after activation in FFN.')\n parser.add_argument('--encoder-embed-path',\n type=str,\n metavar='STR',\n help='path to pre-trained encoder embedding')\n parser.add_argument('--encoder-embed-dim',\n type=int,\n metavar='N',\n help='encoder embedding dimension')\n parser.add_argument('--encoder-ffn-embed-dim',\n type=int,\n metavar='N',\n help='encoder embedding dimension for FFN')\n parser.add_argument('--encoder-layers',\n type=int,\n metavar='N',\n help='num encoder layers')\n parser.add_argument('--encoder-attention-heads',\n type=int,\n metavar='N',\n help='num encoder attention heads')\n parser.add_argument('--encoder-normalize-before',\n action='store_true',\n help='apply layernorm before each encoder block')\n parser.add_argument(\n '--encoder-learned-pos',\n action='store_true',\n help='use learned positional embeddings in the encoder')\n parser.add_argument('--decoder-embed-path',\n type=str,\n metavar='STR',\n help='path to pre-trained decoder embedding')\n parser.add_argument('--decoder-embed-dim',\n type=int,\n metavar='N',\n help='decoder embedding dimension')\n parser.add_argument('--decoder-ffn-embed-dim',\n type=int,\n metavar='N',\n help='decoder embedding dimension for FFN')\n parser.add_argument('--decoder-layers',\n type=int,\n metavar='N',\n help='num decoder layers')\n parser.add_argument('--decoder-attention-heads',\n type=int,\n metavar='N',\n help='num decoder attention heads')\n parser.add_argument(\n '--decoder-learned-pos',\n action='store_true',\n help='use learned positional embeddings in the decoder')\n parser.add_argument('--decoder-normalize-before',\n action='store_true',\n help='apply layernorm before each decoder block')\n parser.add_argument('--share-decoder-input-output-embed',\n action='store_true',\n help='share decoder input and output embeddings')\n parser.add_argument('--share-all-embeddings',\n action='store_true',\n help='share encoder, decoder and output embeddings'\n ' (requires shared dictionary and embed dim)')\n parser.add_argument(\n '--no-token-positional-embeddings',\n default=False,\n action='store_true',\n help=\n 'if set, disables positional embeddings (outside self attention)')\n parser.add_argument(\n '--adaptive-softmax-cutoff',\n metavar='EXPR',\n help='comma separated list of adaptive softmax cutoff points. '\n 'Must be used with adaptive_loss criterion'),\n parser.add_argument(\n '--adaptive-softmax-dropout',\n type=float,\n metavar='D',\n help='sets adaptive softmax dropout for the tail projections')\n # fmt: on\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if not hasattr(args, 'max_source_positions'):\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\n if not hasattr(args, 'max_target_positions'):\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\n\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n if len(task.datasets) > 0:\n src_berttokenizer = next(iter(task.datasets.values())).berttokenizer\n else:\n src_berttokenizer = BertTokenizer.from_pretrained(\n args.bert_model_name)\n\n def build_embedding(dictionary, embed_dim, path=None):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\n # if provided, load from preloaded dictionaries\n if path:\n embed_dict = utils.parse_embedding(path)\n utils.load_embedding(embed_dict, dictionary, emb)\n return emb\n\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError(\n '--share-all-embeddings requires a joined dictionary')\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim'\n )\n if args.decoder_embed_path and (args.decoder_embed_path !=\n args.encoder_embed_path):\n raise ValueError(\n '--share-all-embeddings not compatible with --decoder-embed-path'\n )\n encoder_embed_tokens = build_embedding(src_dict,\n args.encoder_embed_dim,\n args.encoder_embed_path)\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n encoder_embed_tokens = build_embedding(src_dict,\n args.encoder_embed_dim,\n args.encoder_embed_path)\n decoder_embed_tokens = build_embedding(tgt_dict,\n args.decoder_embed_dim,\n args.decoder_embed_path)\n bertencoder = BertModel.from_pretrained(args.bert_model_name)\n args.bert_out_dim = bertencoder.hidden_size\n encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)\n decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)\n\n return TransformerS2Model(encoder, decoder, bertencoder,\n src_berttokenizer, args.mask_cls_sep, args)\n\n @classmethod\n def build_encoder(cls, args, src_dict, embed_tokens):\n return TransformerS2Encoder(args, src_dict, embed_tokens)\n\n @classmethod\n def build_decoder(cls, args, tgt_dict, embed_tokens):\n return TransformerDecoder(args, tgt_dict, embed_tokens)\n\n def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input,\n **kwargs):\n \"\"\"\n Run the forward pass for an encoder-decoder model.\n\n First feed a batch of source tokens through the encoder. Then, feed the\n encoder output and previous decoder outputs (i.e., input feeding/teacher\n forcing) to the decoder to produce the next outputs::\n\n encoder_out = self.encoder(src_tokens, src_lengths)\n return self.decoder(prev_output_tokens, encoder_out)\n\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for input feeding/teacher forcing\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad())\n bert_encoder_out, _ = self.bert_encoder(bert_input,\n output_all_encoded_layers=True,\n attention_mask=1. -\n bert_encoder_padding_mask)\n bert_encoder_out = bert_encoder_out[self.bert_output_layer]\n if self.mask_cls_sep:\n bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls())\n bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep())\n bert_encoder_out = bert_encoder_out.permute(1, 0, 2).contiguous()\n bert_encoder_out = {\n 'bert_encoder_out': bert_encoder_out,\n 'bert_encoder_padding_mask': bert_encoder_padding_mask,\n }\n encoder_out = self.encoder(src_tokens,\n src_lengths=src_lengths,\n bert_encoder_out=bert_encoder_out)\n decoder_out = self.decoder(prev_output_tokens,\n encoder_out=encoder_out,\n bert_encoder_out=bert_encoder_out,\n **kwargs)\n return decoder_out\n\n\n@register_model('transformers3')\nclass TransformerS3Model(FairseqEncoderDecoderS3Model):\n \"\"\"\n Transformer model from `\"Attention Is All You Need\" (Vaswani, et al, 2017)\n <https://arxiv.org/abs/1706.03762>`_.\n Transformer enhanced with Sem-Extractor\n\n Args:\n encoder (TransformerEncoder): the encoder\n decoder (TransformerDecoder): the decoder\n\n The Transformer model provides the following named architectures and\n command-line arguments:\n\n .. argparse::\n :ref: fairseq.models.transformer_parser\n :prog:\n \"\"\"\n def __init__(self,\n encoder,\n decoder,\n sem_extractor_encoder,\n barttokenizer,\n mask_cls_sep=False,\n args=None):\n super().__init__(encoder, decoder, sem_extractor_encoder, barttokenizer,\n mask_cls_sep, args)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--activation-fn',\n choices=utils.get_available_activation_fns(),\n help='activation function to use')\n parser.add_argument('--dropout',\n type=float,\n metavar='D',\n help='dropout probability')\n parser.add_argument('--attention-dropout',\n type=float,\n metavar='D',\n help='dropout probability for attention weights')\n parser.add_argument('--activation-dropout',\n '--relu-dropout',\n type=float,\n metavar='D',\n help='dropout probability after activation in FFN.')\n parser.add_argument('--encoder-embed-path',\n type=str,\n metavar='STR',\n help='path to pre-trained encoder embedding')\n parser.add_argument('--encoder-embed-dim',\n type=int,\n metavar='N',\n help='encoder embedding dimension')\n parser.add_argument('--encoder-ffn-embed-dim',\n type=int,\n metavar='N',\n help='encoder embedding dimension for FFN')\n parser.add_argument('--encoder-layers',\n type=int,\n metavar='N',\n help='num encoder layers')\n parser.add_argument('--encoder-attention-heads',\n type=int,\n metavar='N',\n help='num encoder attention heads')\n parser.add_argument('--encoder-normalize-before',\n action='store_true',\n help='apply layernorm before each encoder block')\n parser.add_argument(\n '--encoder-learned-pos',\n action='store_true',\n help='use learned positional embeddings in the encoder')\n parser.add_argument('--decoder-embed-path',\n type=str,\n metavar='STR',\n help='path to pre-trained decoder embedding')\n parser.add_argument('--decoder-embed-dim',\n type=int,\n metavar='N',\n help='decoder embedding dimension')\n parser.add_argument('--decoder-ffn-embed-dim',\n type=int,\n metavar='N',\n help='decoder embedding dimension for FFN')\n parser.add_argument('--decoder-layers',\n type=int,\n metavar='N',\n help='num decoder layers')\n parser.add_argument('--decoder-attention-heads',\n type=int,\n metavar='N',\n help='num decoder attention heads')\n parser.add_argument(\n '--decoder-learned-pos',\n action='store_true',\n help='use learned positional embeddings in the decoder')\n parser.add_argument('--decoder-normalize-before',\n action='store_true',\n help='apply layernorm before each decoder block')\n parser.add_argument('--share-decoder-input-output-embed',\n action='store_true',\n help='share decoder input and output embeddings')\n parser.add_argument('--share-all-embeddings',\n action='store_true',\n help='share encoder, decoder and output embeddings'\n ' (requires shared dictionary and embed dim)')\n parser.add_argument(\n '--no-token-positional-embeddings',\n default=False,\n action='store_true',\n help=\n 'if set, disables positional embeddings (outside self attention)')\n parser.add_argument(\n '--adaptive-softmax-cutoff',\n metavar='EXPR',\n help='comma separated list of adaptive softmax cutoff points. '\n 'Must be used with adaptive_loss criterion'),\n parser.add_argument(\n '--adaptive-softmax-dropout',\n type=float,\n metavar='D',\n help='sets adaptive softmax dropout for the tail projections')\n # fmt: on\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if not hasattr(args, 'max_source_positions'):\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\n if not hasattr(args, 'max_target_positions'):\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\n\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n if len(task.datasets) > 0:\n # src_berttokenizer = next(iter(task.datasets.values())).berttokenizer\n # NOTE: didn't change the dataset generated by task\n # so the variable name is still `berttokenizer` in datasets\n # but what is passed in is barttokenizer\n src_barttokenizer = next(iter(task.datasets.values())).berttokenizer\n else:\n # src_berttokenizer = BertTokenizer.from_pretrained(\n # args.bert_model_name)\n src_barttokenizer = BartTokenizer.from_pretrained(\n 'facebook/bart-base', cache_dir=args.bart_tokenizer_cache_dir)\n\n def build_embedding(dictionary, embed_dim, path=None):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\n # if provided, load from preloaded dictionaries\n if path:\n embed_dict = utils.parse_embedding(path)\n utils.load_embedding(embed_dict, dictionary, emb)\n return emb\n\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError(\n '--share-all-embeddings requires a joined dictionary')\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim'\n )\n if args.decoder_embed_path and (args.decoder_embed_path !=\n args.encoder_embed_path):\n raise ValueError(\n '--share-all-embeddings not compatible with --decoder-embed-path'\n )\n encoder_embed_tokens = build_embedding(src_dict,\n args.encoder_embed_dim,\n args.encoder_embed_path)\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n encoder_embed_tokens = build_embedding(src_dict,\n args.encoder_embed_dim,\n args.encoder_embed_path)\n decoder_embed_tokens = build_embedding(tgt_dict,\n args.decoder_embed_dim,\n args.decoder_embed_path)\n # bertencoder = BertModel.from_pretrained(args.bert_model_name)\n sem_extractor_encoder = SemExtractorEncoder.from_pretrained(\n args.sem_extractor_model, args.bart_tokenizer_cache_dir)\n args.bert_out_dim = sem_extractor_encoder.hidden_size\n encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)\n decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)\n\n return TransformerS3Model(encoder, decoder, sem_extractor_encoder,\n src_barttokenizer, args.mask_cls_sep, args)\n\n @classmethod\n def build_encoder(cls, args, src_dict, embed_tokens):\n return TransformerS2Encoder(args, src_dict, embed_tokens)\n\n @classmethod\n def build_decoder(cls, args, tgt_dict, embed_tokens):\n return TransformerDecoder(args, tgt_dict, embed_tokens)\n\n def forward(self, src_tokens, src_lengths, prev_output_tokens, bart_input,\n **kwargs):\n \"\"\"\n Run the forward pass for an encoder-decoder model.\n\n First feed a batch of source tokens through the encoder. Then, feed the\n encoder output and previous decoder outputs (i.e., input feeding/teacher\n forcing) to the decoder to produce the next outputs::\n\n encoder_out = self.encoder(src_tokens, src_lengths)\n return self.decoder(prev_output_tokens, encoder_out)\n\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for input feeding/teacher forcing\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n sem_extractor_encoder_padding_mask = bart_input.eq(\n self.barttokenizer.pad())\n sem_extractor_encoder_output = self.sem_extractor_encoder.forward_token(\n bart_input,\n attention_mask=1. - sem_extractor_encoder_padding_mask,\n output_layer=self.sem_extractor_encoder_output_layer)\n # bert_encoder_out = bert_encoder_out[self.bert_output_layer]\n if self.mask_cls_sep:\n sem_extractor_encoder_padding_mask += bart_input.eq(\n self.barttokenizer.cls())\n sem_extractor_encoder_padding_mask += bart_input.eq(\n self.barttokenizer.sep())\n # TODO: find out the dimention of `bert_encoder_out` before permute operation\n bert_encoder_out = bert_encoder_out.permute(1, 0, 2).contiguous()\n\n sem_extractor_encoder_output = {\n 'bert_encoder_output': sem_extractor_encoder_output,\n 'bert_encoder_padding_mask': sem_extractor_encoder_padding_mask,\n }\n encoder_out = self.encoder(\n src_tokens,\n src_lengths=src_lengths,\n bert_encoder_out=sem_extractor_encoder_output)\n decoder_out = self.decoder(\n prev_output_tokens,\n encoder_out=encoder_out,\n bert_encoder_out=sem_extractor_encoder_output,\n **kwargs)\n return decoder_out\n\n\n@register_model('transformerstack')\nclass TransformerModelStack(FairseqEncoderDecoderModel):\n \"\"\"\n Transformer model from `\"Attention Is All You Need\" (Vaswani, et al, 2017)\n <https://arxiv.org/abs/1706.03762>`_.\n\n Args:\n encoder (TransformerEncoder): the encoder\n decoder (TransformerDecoder): the decoder\n\n The Transformer model provides the following named architectures and\n command-line arguments:\n\n .. argparse::\n :ref: fairseq.models.transformer_parser\n :prog:\n \"\"\"\n def __init__(self,\n encoder,\n decoder,\n bertencoder,\n berttokenizer,\n mask_cls_sep=False):\n super().__init__(encoder, decoder, bertencoder, berttokenizer,\n mask_cls_sep)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--activation-fn',\n choices=utils.get_available_activation_fns(),\n help='activation function to use')\n parser.add_argument('--dropout',\n type=float,\n metavar='D',\n help='dropout probability')\n parser.add_argument('--attention-dropout',\n type=float,\n metavar='D',\n help='dropout probability for attention weights')\n parser.add_argument('--activation-dropout',\n '--relu-dropout',\n type=float,\n metavar='D',\n help='dropout probability after activation in FFN.')\n parser.add_argument('--encoder-embed-path',\n type=str,\n metavar='STR',\n help='path to pre-trained encoder embedding')\n parser.add_argument('--encoder-embed-dim',\n type=int,\n metavar='N',\n help='encoder embedding dimension')\n parser.add_argument('--encoder-ffn-embed-dim',\n type=int,\n metavar='N',\n help='encoder embedding dimension for FFN')\n parser.add_argument('--encoder-layers',\n type=int,\n metavar='N',\n help='num encoder layers')\n parser.add_argument('--encoder-attention-heads',\n type=int,\n metavar='N',\n help='num encoder attention heads')\n parser.add_argument('--encoder-normalize-before',\n action='store_true',\n help='apply layernorm before each encoder block')\n parser.add_argument(\n '--encoder-learned-pos',\n action='store_true',\n help='use learned positional embeddings in the encoder')\n parser.add_argument('--decoder-embed-path',\n type=str,\n metavar='STR',\n help='path to pre-trained decoder embedding')\n parser.add_argument('--decoder-embed-dim',\n type=int,\n metavar='N',\n help='decoder embedding dimension')\n parser.add_argument('--decoder-ffn-embed-dim',\n type=int,\n metavar='N',\n help='decoder embedding dimension for FFN')\n parser.add_argument('--decoder-layers',\n type=int,\n metavar='N',\n help='num decoder layers')\n parser.add_argument('--decoder-attention-heads',\n type=int,\n metavar='N',\n help='num decoder attention heads')\n parser.add_argument(\n '--decoder-learned-pos',\n action='store_true',\n help='use learned positional embeddings in the decoder')\n parser.add_argument('--decoder-normalize-before',\n action='store_true',\n help='apply layernorm before each decoder block')\n parser.add_argument('--share-decoder-input-output-embed',\n action='store_true',\n help='share decoder input and output embeddings')\n parser.add_argument('--share-all-embeddings',\n action='store_true',\n help='share encoder, decoder and output embeddings'\n ' (requires shared dictionary and embed dim)')\n parser.add_argument(\n '--no-token-positional-embeddings',\n default=False,\n action='store_true',\n help=\n 'if set, disables positional embeddings (outside self attention)')\n parser.add_argument(\n '--adaptive-softmax-cutoff',\n metavar='EXPR',\n help='comma separated list of adaptive softmax cutoff points. '\n 'Must be used with adaptive_loss criterion'),\n parser.add_argument(\n '--adaptive-softmax-dropout',\n type=float,\n metavar='D',\n help='sets adaptive softmax dropout for the tail projections')\n\n # fmt: on\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if not hasattr(args, 'max_source_positions'):\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\n if not hasattr(args, 'max_target_positions'):\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\n\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n if len(task.datasets) > 0:\n src_berttokenizer = next(iter(task.datasets.values())).berttokenizer\n else:\n src_berttokenizer = BertTokenizer.from_pretrained(\n args.bert_model_name)\n\n def build_embedding(dictionary, embed_dim, path=None):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\n # if provided, load from preloaded dictionaries\n if path:\n embed_dict = utils.parse_embedding(path)\n utils.load_embedding(embed_dict, dictionary, emb)\n return emb\n\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError(\n '--share-all-embeddings requires a joined dictionary')\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim'\n )\n if args.decoder_embed_path and (args.decoder_embed_path !=\n args.encoder_embed_path):\n raise ValueError(\n '--share-all-embeddings not compatible with --decoder-embed-path'\n )\n encoder_embed_tokens = build_embedding(src_dict,\n args.encoder_embed_dim,\n args.encoder_embed_path)\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n encoder_embed_tokens = build_embedding(src_dict,\n args.encoder_embed_dim,\n args.encoder_embed_path)\n decoder_embed_tokens = build_embedding(tgt_dict,\n args.decoder_embed_dim,\n args.decoder_embed_path)\n bertencoder = BertModel.from_pretrained(args.bert_model_name)\n args.bert_out_dim = bertencoder.hidden_size\n encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)\n decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)\n\n return TransformerModel(encoder, decoder, bertencoder,\n src_berttokenizer, args.mask_cls_sep)\n\n @classmethod\n def build_encoder(cls, args, src_dict, embed_tokens):\n return TransformerEncoder(args, src_dict, embed_tokens)\n\n @classmethod\n def build_decoder(cls, args, tgt_dict, embed_tokens):\n return TransformerDecoderStack(args, tgt_dict, embed_tokens)\n\n\nclass TransformerEncoder(FairseqEncoder):\n \"\"\"\n Transformer encoder consisting of *args.encoder_layers* layers. Each layer\n is a :class:`TransformerEncoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): encoding dictionary\n embed_tokens (torch.nn.Embedding): input embedding\n \"\"\"\n def __init__(self, args, dictionary, embed_tokens):\n super().__init__(dictionary)\n self.register_buffer('version', torch.Tensor([3]))\n\n self.dropout = args.dropout\n\n embed_dim = embed_tokens.embedding_dim\n self.padding_idx = embed_tokens.padding_idx\n self.max_source_positions = args.max_source_positions\n\n self.embed_tokens = embed_tokens\n self.embed_scale = math.sqrt(embed_dim)\n self.embed_positions = PositionalEmbedding(\n args.max_source_positions,\n embed_dim,\n self.padding_idx,\n learned=args.encoder_learned_pos,\n ) if not args.no_token_positional_embeddings else None\n\n self.layers = nn.ModuleList([])\n self.layers.extend(\n [TransformerEncoderLayer(args) for i in range(args.encoder_layers)])\n\n if args.encoder_normalize_before:\n self.layer_norm = LayerNorm(embed_dim)\n else:\n self.layer_norm = None\n\n def forward(self, src_tokens, src_lengths):\n \"\"\"\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (torch.LongTensor): lengths of each source sentence of\n shape `(batch)`\n\n Returns:\n dict:\n - **encoder_out** (Tensor): the last encoder layer's output of\n shape `(src_len, batch, embed_dim)`\n - **encoder_padding_mask** (ByteTensor): the positions of\n padding elements of shape `(batch, src_len)`\n \"\"\"\n # embed tokens and positions\n x = self.embed_scale * self.embed_tokens(src_tokens)\n if self.embed_positions is not None:\n x += self.embed_positions(src_tokens)\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n # compute padding mask\n encoder_padding_mask = src_tokens.eq(self.padding_idx)\n if not encoder_padding_mask.any():\n encoder_padding_mask = None\n\n # encoder layers\n for layer in self.layers:\n x = layer(x, encoder_padding_mask)\n\n if self.layer_norm:\n x = self.layer_norm(x)\n\n return {\n 'encoder_out': x, # T x B x C\n 'encoder_padding_mask': encoder_padding_mask, # B x T\n }\n\n def reorder_encoder_out(self, encoder_out, bert_outs, new_order):\n \"\"\"\n Reorder encoder output according to *new_order*.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n *encoder_out* rearranged according to *new_order*\n \"\"\"\n if encoder_out['encoder_out'] is not None:\n encoder_out['encoder_out'] = \\\n encoder_out['encoder_out'].index_select(1, new_order)\n if encoder_out['encoder_padding_mask'] is not None:\n encoder_out['encoder_padding_mask'] = \\\n encoder_out['encoder_padding_mask'].index_select(0, new_order)\n if bert_outs['bert_encoder_out'] is not None:\n bert_outs['bert_encoder_out'] = \\\n bert_outs['bert_encoder_out'].index_select(1, new_order)\n if bert_outs['bert_encoder_padding_mask'] is not None:\n bert_outs['bert_encoder_padding_mask'] = \\\n bert_outs['bert_encoder_padding_mask'].index_select(0, new_order)\n return encoder_out, bert_outs\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n if self.embed_positions is None:\n return self.max_source_positions\n return min(self.max_source_positions,\n self.embed_positions.max_positions())\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = '{}.embed_positions.weights'.format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict['{}.embed_positions._float_tensor'.format(\n name)] = torch.FloatTensor(1)\n for i in range(len(self.layers)):\n # update layer norms\n self.layers[i].upgrade_state_dict_named(\n state_dict, \"{}.layers.{}\".format(name, i))\n\n version_key = '{}.version'.format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n return state_dict\n\n\nclass TransformerS2Encoder(FairseqEncoder):\n \"\"\"\n Transformer encoder consisting of *args.encoder_layers* layers. Each layer\n is a :class:`TransformerEncoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): encoding dictionary\n embed_tokens (torch.nn.Embedding): input embedding\n \"\"\"\n def __init__(self, args, dictionary, embed_tokens):\n super().__init__(dictionary)\n self.register_buffer('version', torch.Tensor([3]))\n\n self.dropout = args.dropout\n\n embed_dim = embed_tokens.embedding_dim\n self.padding_idx = embed_tokens.padding_idx\n self.max_source_positions = args.max_source_positions\n\n self.embed_tokens = embed_tokens\n self.embed_scale = math.sqrt(embed_dim)\n self.embed_positions = PositionalEmbedding(\n args.max_source_positions,\n embed_dim,\n self.padding_idx,\n learned=args.encoder_learned_pos,\n ) if not args.no_token_positional_embeddings else None\n\n bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1])\n bert_gates = [x == 1 for x in bert_gates]\n assert len(bert_gates) == args.encoder_layers\n\n self.layers = nn.ModuleList([])\n self.layers.extend([\n TransformerS2EncoderLayer(args, bert_gate=bert_gates[i])\n for i in range(args.encoder_layers)\n ])\n\n if args.encoder_normalize_before:\n self.layer_norm = LayerNorm(embed_dim)\n else:\n self.layer_norm = None\n\n def forward(self, src_tokens, src_lengths, bert_encoder_out):\n \"\"\"\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (torch.LongTensor): lengths of each source sentence of\n shape `(batch)`\n\n Returns:\n dict:\n - **encoder_out** (Tensor): the last encoder layer's output of\n shape `(src_len, batch, embed_dim)`\n - **encoder_padding_mask** (ByteTensor): the positions of\n padding elements of shape `(batch, src_len)`\n \"\"\"\n # embed tokens and positions\n x = self.embed_scale * self.embed_tokens(src_tokens)\n if self.embed_positions is not None:\n x += self.embed_positions(src_tokens)\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n # compute padding mask\n encoder_padding_mask = src_tokens.eq(self.padding_idx)\n if not encoder_padding_mask.any():\n encoder_padding_mask = None\n\n # encoder layers\n for layer in self.layers:\n x = layer(x, encoder_padding_mask,\n bert_encoder_out['bert_encoder_out'],\n bert_encoder_out['bert_encoder_padding_mask'])\n\n if self.layer_norm:\n x = self.layer_norm(x)\n\n return {\n 'encoder_out': x, # T x B x C\n 'encoder_padding_mask': encoder_padding_mask, # B x T\n }\n\n def reorder_encoder_out(self, encoder_out, bert_outs, new_order):\n \"\"\"\n Reorder encoder output according to *new_order*.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n *encoder_out* rearranged according to *new_order*\n \"\"\"\n if encoder_out['encoder_out'] is not None:\n encoder_out['encoder_out'] = \\\n encoder_out['encoder_out'].index_select(1, new_order)\n if encoder_out['encoder_padding_mask'] is not None:\n encoder_out['encoder_padding_mask'] = \\\n encoder_out['encoder_padding_mask'].index_select(0, new_order)\n if bert_outs['bert_encoder_out'] is not None:\n bert_outs['bert_encoder_out'] = \\\n bert_outs['bert_encoder_out'].index_select(1, new_order)\n if bert_outs['bert_encoder_padding_mask'] is not None:\n bert_outs['bert_encoder_padding_mask'] = \\\n bert_outs['bert_encoder_padding_mask'].index_select(0, new_order)\n return encoder_out, bert_outs\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n if self.embed_positions is None:\n return self.max_source_positions\n return min(self.max_source_positions,\n self.embed_positions.max_positions())\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = '{}.embed_positions.weights'.format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict['{}.embed_positions._float_tensor'.format(\n name)] = torch.FloatTensor(1)\n for i in range(len(self.layers)):\n # update layer norms\n self.layers[i].upgrade_state_dict_named(\n state_dict, \"{}.layers.{}\".format(name, i))\n\n version_key = '{}.version'.format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n return state_dict\n\n\nclass TransformerDecoder(FairseqIncrementalDecoder):\n \"\"\"\n Transformer decoder consisting of *args.decoder_layers* layers. Each layer\n is a :class:`TransformerDecoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n embed_tokens (torch.nn.Embedding): output embedding\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):\n super().__init__(dictionary)\n self.register_buffer('version', torch.Tensor([3]))\n\n self.dropout = args.dropout\n self.share_input_output_embed = args.share_decoder_input_output_embed\n\n input_embed_dim = embed_tokens.embedding_dim\n embed_dim = args.decoder_embed_dim\n self.output_embed_dim = args.decoder_output_dim\n\n padding_idx = embed_tokens.padding_idx\n self.max_target_positions = args.max_target_positions\n\n self.embed_tokens = embed_tokens\n self.embed_scale = math.sqrt(\n embed_dim) # todo: try with input_embed_dim\n\n self.project_in_dim = Linear(\n input_embed_dim, embed_dim,\n bias=False) if embed_dim != input_embed_dim else None\n\n self.embed_positions = PositionalEmbedding(\n args.max_target_positions,\n embed_dim,\n padding_idx,\n learned=args.decoder_learned_pos,\n ) if not args.no_token_positional_embeddings else None\n\n bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1])\n bert_gates = [x == 1 for x in bert_gates]\n assert len(bert_gates) == args.decoder_layers\n print('bert_gates', bert_gates)\n self.layers = nn.ModuleList([])\n decoder_no_bert = getattr(args, 'decoder_no_bert', False)\n if decoder_no_bert:\n self.layers.extend([\n TransformerStandardDecoderLayer(args,\n no_encoder_attn,\n bert_gate=bert_gates[i])\n for i in range(args.decoder_layers)\n ])\n else:\n self.layers.extend([\n TransformerDecoderLayer(args,\n no_encoder_attn,\n bert_gate=bert_gates[i])\n for i in range(args.decoder_layers)\n ])\n\n self.adaptive_softmax = None\n\n self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \\\n if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None\n\n if args.adaptive_softmax_cutoff is not None:\n self.adaptive_softmax = AdaptiveSoftmax(\n len(dictionary),\n self.output_embed_dim,\n options.eval_str_list(args.adaptive_softmax_cutoff, type=int),\n dropout=args.adaptive_softmax_dropout,\n adaptive_inputs=embed_tokens\n if args.tie_adaptive_weights else None,\n factor=args.adaptive_softmax_factor,\n tie_proj=args.tie_adaptive_proj,\n )\n elif not self.share_input_output_embed:\n self.embed_out = nn.Parameter(\n torch.Tensor(len(dictionary), self.output_embed_dim))\n nn.init.normal_(self.embed_out,\n mean=0,\n std=self.output_embed_dim**-0.5)\n\n if args.decoder_normalize_before and not getattr(\n args, 'no_decoder_final_norm', False):\n self.layer_norm = LayerNorm(embed_dim)\n else:\n self.layer_norm = None\n\n def forward(self,\n prev_output_tokens,\n encoder_out=None,\n bert_encoder_out=None,\n incremental_state=None,\n **unused):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for input feeding/teacher forcing\n encoder_out (Tensor, optional): output from the encoder, used for\n encoder-side attention\n incremental_state (dict): dictionary used for storing state during\n :ref:`Incremental decoding`\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n x, extra = self.extract_features(prev_output_tokens, encoder_out,\n bert_encoder_out, incremental_state)\n x = self.output_layer(x)\n return x, extra\n\n def extract_features(self,\n prev_output_tokens,\n encoder_out=None,\n bert_encoder_out=None,\n incremental_state=None,\n **unused):\n \"\"\"\n Similar to *forward* but only return features.\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n \"\"\"\n # embed positions\n positions = self.embed_positions(\n prev_output_tokens,\n incremental_state=incremental_state,\n ) if self.embed_positions is not None else None\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n if positions is not None:\n positions = positions[:, -1:]\n\n # embed tokens and positions\n x = self.embed_scale * self.embed_tokens(prev_output_tokens)\n\n if self.project_in_dim is not None:\n x = self.project_in_dim(x)\n\n if positions is not None:\n x += positions\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n attn = None\n\n inner_states = [x]\n\n # decoder layers\n for layer in self.layers:\n x, attn = layer(\n x,\n encoder_out['encoder_out'] if encoder_out is not None else None,\n encoder_out['encoder_padding_mask']\n if encoder_out is not None else None,\n bert_encoder_out['bert_encoder_out'],\n bert_encoder_out['bert_encoder_padding_mask'],\n incremental_state,\n self_attn_mask=self.buffered_future_mask(x)\n if incremental_state is None else None,\n )\n inner_states.append(x)\n\n if self.layer_norm:\n x = self.layer_norm(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n if self.project_out_dim is not None:\n x = self.project_out_dim(x)\n\n return x, {'attn': attn, 'inner_states': inner_states}\n\n def output_layer(self, features, **kwargs):\n \"\"\"Project features to the vocabulary size.\"\"\"\n if self.adaptive_softmax is None:\n # project back to size of vocabulary\n if self.share_input_output_embed:\n return F.linear(features, self.embed_tokens.weight)\n else:\n return F.linear(features, self.embed_out)\n else:\n return features\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the decoder.\"\"\"\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions,\n self.embed_positions.max_positions())\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n if not hasattr(\n self, '_future_mask'\n ) or self._future_mask is None or self._future_mask.device != tensor.device:\n self._future_mask = torch.triu(\n utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\n if self._future_mask.size(0) < dim:\n self._future_mask = torch.triu(\n utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)\n return self._future_mask[:dim, :dim]\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = '{}.embed_positions.weights'.format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict['{}.embed_positions._float_tensor'.format(\n name)] = torch.FloatTensor(1)\n\n for i in range(len(self.layers)):\n # update layer norms\n layer_norm_map = {\n '0': 'self_attn_layer_norm',\n '1': 'encoder_attn_layer_norm',\n '2': 'final_layer_norm'\n }\n for old, new in layer_norm_map.items():\n for m in ('weight', 'bias'):\n k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)\n if k in state_dict:\n state_dict['{}.layers.{}.{}.{}'.format(\n name, i, new, m)] = state_dict[k]\n del state_dict[k]\n\n version_key = '{}.version'.format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n\n return state_dict\n\n\nclass TransformerDecoderStack(FairseqIncrementalDecoder):\n \"\"\"\n Transformer decoder consisting of *args.decoder_layers* layers. Each layer\n is a :class:`TransformerDecoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n embed_tokens (torch.nn.Embedding): output embedding\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):\n super().__init__(dictionary)\n self.register_buffer('version', torch.Tensor([3]))\n\n self.dropout = args.dropout\n self.share_input_output_embed = args.share_decoder_input_output_embed\n\n input_embed_dim = embed_tokens.embedding_dim\n embed_dim = args.decoder_embed_dim\n self.output_embed_dim = args.decoder_output_dim\n\n padding_idx = embed_tokens.padding_idx\n self.max_target_positions = args.max_target_positions\n\n self.embed_tokens = embed_tokens\n self.embed_scale = math.sqrt(\n embed_dim) # todo: try with input_embed_dim\n\n self.project_in_dim = Linear(\n input_embed_dim, embed_dim,\n bias=False) if embed_dim != input_embed_dim else None\n\n self.embed_positions = PositionalEmbedding(\n args.max_target_positions,\n embed_dim,\n padding_idx,\n learned=args.decoder_learned_pos,\n ) if not args.no_token_positional_embeddings else None\n\n self.layers = nn.ModuleList([])\n self.layers.extend([\n TransformerDecoderLayerStack(args, no_encoder_attn)\n for _ in range(args.decoder_layers)\n ])\n\n self.adaptive_softmax = None\n\n self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \\\n if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None\n\n if args.adaptive_softmax_cutoff is not None:\n self.adaptive_softmax = AdaptiveSoftmax(\n len(dictionary),\n self.output_embed_dim,\n options.eval_str_list(args.adaptive_softmax_cutoff, type=int),\n dropout=args.adaptive_softmax_dropout,\n adaptive_inputs=embed_tokens\n if args.tie_adaptive_weights else None,\n factor=args.adaptive_softmax_factor,\n tie_proj=args.tie_adaptive_proj,\n )\n elif not self.share_input_output_embed:\n self.embed_out = nn.Parameter(\n torch.Tensor(len(dictionary), self.output_embed_dim))\n nn.init.normal_(self.embed_out,\n mean=0,\n std=self.output_embed_dim**-0.5)\n\n if args.decoder_normalize_before and not getattr(\n args, 'no_decoder_final_norm', False):\n self.layer_norm = LayerNorm(embed_dim)\n else:\n self.layer_norm = None\n\n def forward(self,\n prev_output_tokens,\n encoder_out=None,\n bert_encoder_out=None,\n incremental_state=None,\n **unused):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for input feeding/teacher forcing\n encoder_out (Tensor, optional): output from the encoder, used for\n encoder-side attention\n incremental_state (dict): dictionary used for storing state during\n :ref:`Incremental decoding`\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n x, extra = self.extract_features(prev_output_tokens, encoder_out,\n bert_encoder_out, incremental_state)\n x = self.output_layer(x)\n return x, extra\n\n def extract_features(self,\n prev_output_tokens,\n encoder_out=None,\n bert_encoder_out=None,\n incremental_state=None,\n **unused):\n \"\"\"\n Similar to *forward* but only return features.\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n \"\"\"\n # embed positions\n positions = self.embed_positions(\n prev_output_tokens,\n incremental_state=incremental_state,\n ) if self.embed_positions is not None else None\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n if positions is not None:\n positions = positions[:, -1:]\n\n # embed tokens and positions\n x = self.embed_scale * self.embed_tokens(prev_output_tokens)\n\n if self.project_in_dim is not None:\n x = self.project_in_dim(x)\n\n if positions is not None:\n x += positions\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n attn = None\n\n inner_states = [x]\n\n # decoder layers\n for layer in self.layers:\n x, attn = layer(\n x,\n encoder_out['encoder_out'] if encoder_out is not None else None,\n encoder_out['encoder_padding_mask']\n if encoder_out is not None else None,\n bert_encoder_out['bert_encoder_out'],\n bert_encoder_out['bert_encoder_padding_mask'],\n incremental_state,\n self_attn_mask=self.buffered_future_mask(x)\n if incremental_state is None else None,\n )\n inner_states.append(x)\n\n if self.layer_norm:\n x = self.layer_norm(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n if self.project_out_dim is not None:\n x = self.project_out_dim(x)\n\n return x, {'attn': attn, 'inner_states': inner_states}\n\n def output_layer(self, features, **kwargs):\n \"\"\"Project features to the vocabulary size.\"\"\"\n if self.adaptive_softmax is None:\n # project back to size of vocabulary\n if self.share_input_output_embed:\n return F.linear(features, self.embed_tokens.weight)\n else:\n return F.linear(features, self.embed_out)\n else:\n return features\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the decoder.\"\"\"\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions,\n self.embed_positions.max_positions())\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n if not hasattr(\n self, '_future_mask'\n ) or self._future_mask is None or self._future_mask.device != tensor.device:\n self._future_mask = torch.triu(\n utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\n if self._future_mask.size(0) < dim:\n self._future_mask = torch.triu(\n utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)\n return self._future_mask[:dim, :dim]\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = '{}.embed_positions.weights'.format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict['{}.embed_positions._float_tensor'.format(\n name)] = torch.FloatTensor(1)\n\n for i in range(len(self.layers)):\n # update layer norms\n layer_norm_map = {\n '0': 'self_attn_layer_norm',\n '1': 'encoder_attn_layer_norm',\n '2': 'final_layer_norm'\n }\n for old, new in layer_norm_map.items():\n for m in ('weight', 'bias'):\n k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)\n if k in state_dict:\n state_dict['{}.layers.{}.{}.{}'.format(\n name, i, new, m)] = state_dict[k]\n del state_dict[k]\n\n version_key = '{}.version'.format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n\n return state_dict\n\n\nclass TransformerEncoderLayer(nn.Module):\n \"\"\"Encoder layer block.\n\n In the original paper each operation (multi-head attention or FFN) is\n postprocessed with: `dropout -> add residual -> layernorm`. In the\n tensor2tensor code they suggest that learning is more robust when\n preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.encoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n def __init__(self, args):\n super().__init__()\n self.embed_dim = args.encoder_embed_dim\n self.self_attn = MultiheadAttention(self.embed_dim,\n args.encoder_attention_heads,\n dropout=args.attention_dropout,\n self_attention=True)\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\n self.dropout = args.dropout\n self.activation_fn = utils.get_activation_fn(\n activation=getattr(args, 'activation_fn', 'relu'))\n self.activation_dropout = getattr(args, 'activation_dropout', 0)\n if self.activation_dropout == 0:\n # for backwards compatibility with models that use args.relu_dropout\n self.activation_dropout = getattr(args, 'relu_dropout', 0)\n self.normalize_before = args.encoder_normalize_before\n self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)\n self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)\n self.final_layer_norm = LayerNorm(self.embed_dim)\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"\n Rename layer norm states from `...layer_norms.0.weight` to\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\n `...final_layer_norm.weight`\n \"\"\"\n layer_norm_map = {'0': 'self_attn_layer_norm', '1': 'final_layer_norm'}\n for old, new in layer_norm_map.items():\n for m in ('weight', 'bias'):\n k = '{}.layer_norms.{}.{}'.format(name, old, m)\n if k in state_dict:\n state_dict['{}.{}.{}'.format(name, new, m)] = state_dict[k]\n del state_dict[k]\n\n def forward(self, x, encoder_padding_mask):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n\n Returns:\n encoded output of shape `(batch, src_len, embed_dim)`\n \"\"\"\n residual = x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\n x, _ = self.self_attn(query=x,\n key=x,\n value=x,\n key_padding_mask=encoder_padding_mask)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\n\n residual = x\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\n return x\n\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\n assert before ^ after\n if after ^ self.normalize_before:\n return layer_norm(x)\n else:\n return x\n\n\nclass TransformerS2EncoderLayer(nn.Module):\n \"\"\"Encoder layer block.\n\n In the original paper each operation (multi-head attention or FFN) is\n postprocessed with: `dropout -> add residual -> layernorm`. In the\n tensor2tensor code they suggest that learning is more robust when\n preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.encoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n def __init__(self, args, bert_gate=True):\n super().__init__()\n self.embed_dim = args.encoder_embed_dim\n self.self_attn = MultiheadAttention(self.embed_dim,\n args.encoder_attention_heads,\n dropout=args.attention_dropout,\n self_attention=True)\n bert_out_dim = args.bert_out_dim\n self.bert_attn = MultiheadAttention(\n self.embed_dim,\n args.encoder_attention_heads,\n kdim=bert_out_dim,\n vdim=bert_out_dim,\n dropout=args.attention_dropout,\n )\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\n self.dropout = args.dropout\n self.activation_fn = utils.get_activation_fn(\n activation=getattr(args, 'activation_fn', 'relu'))\n self.activation_dropout = getattr(args, 'activation_dropout', 0)\n if self.activation_dropout == 0:\n # for backwards compatibility with models that use args.relu_dropout\n self.activation_dropout = getattr(args, 'relu_dropout', 0)\n self.normalize_before = args.encoder_normalize_before\n self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)\n self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)\n self.final_layer_norm = LayerNorm(self.embed_dim)\n self.encoder_ratio = args.encoder_ratio\n self.bert_ratio = args.bert_ratio\n\n self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)\n self.encoder_bert_dropout_ratio = getattr(args,\n 'encoder_bert_dropout_ratio',\n 0.25)\n assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5\n self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)\n\n if not bert_gate:\n self.bert_ratio = 0.\n self.encoder_bert_dropout = False\n self.encoder_bert_mixup = False\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"\n Rename layer norm states from `...layer_norms.0.weight` to\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\n `...final_layer_norm.weight`\n \"\"\"\n layer_norm_map = {'0': 'self_attn_layer_norm', '1': 'final_layer_norm'}\n for old, new in layer_norm_map.items():\n for m in ('weight', 'bias'):\n k = '{}.layer_norms.{}.{}'.format(name, old, m)\n if k in state_dict:\n state_dict['{}.{}.{}'.format(name, new, m)] = state_dict[k]\n del state_dict[k]\n\n def forward(self, x, encoder_padding_mask, bert_encoder_out,\n bert_encoder_padding_mask):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n\n Returns:\n encoded output of shape `(batch, src_len, embed_dim)`\n \"\"\"\n residual = x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\n x1, _ = self.self_attn(query=x,\n key=x,\n value=x,\n key_padding_mask=encoder_padding_mask)\n x2, _ = self.bert_attn(query=x,\n key=bert_encoder_out,\n value=bert_encoder_out,\n key_padding_mask=bert_encoder_padding_mask)\n x1 = F.dropout(x1, p=self.dropout, training=self.training)\n x2 = F.dropout(x2, p=self.dropout, training=self.training)\n ratios = self.get_ratio()\n x = residual + ratios[0] * x1 + ratios[1] * x2\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\n\n residual = x\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\n return x\n\n def get_ratio(self):\n if self.encoder_bert_dropout:\n frand = float(uniform(0, 1))\n if self.encoder_bert_mixup and self.training:\n return [frand, 1 - frand]\n if frand < self.encoder_bert_dropout_ratio and self.training:\n return [1, 0]\n elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:\n return [0, 1]\n else:\n return [0.5, 0.5]\n else:\n return [self.encoder_ratio, self.bert_ratio]\n\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\n assert before ^ after\n if after ^ self.normalize_before:\n return layer_norm(x)\n else:\n return x\n\n\nclass TransformerDecoderLayer(nn.Module):\n \"\"\"Decoder layer block.\n\n In the original paper each operation (multi-head attention, encoder\n attention or FFN) is postprocessed with: `dropout -> add residual ->\n layernorm`. In the tensor2tensor code they suggest that learning is more\n robust when preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.decoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n def __init__(self,\n args,\n no_encoder_attn=False,\n add_bias_kv=False,\n add_zero_attn=False,\n bert_gate=True):\n super().__init__()\n self.embed_dim = args.decoder_embed_dim\n self.self_attn = MultiheadAttention(\n embed_dim=self.embed_dim,\n num_heads=args.decoder_attention_heads,\n dropout=args.attention_dropout,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n self_attention=True)\n self.dropout = args.dropout\n self.activation_fn = utils.get_activation_fn(\n activation=getattr(args, 'activation_fn', 'relu'))\n self.activation_dropout = getattr(args, 'activation_dropout', 0)\n if self.activation_dropout == 0:\n # for backwards compatibility with models that use args.relu_dropout\n self.activation_dropout = getattr(args, 'relu_dropout', 0)\n self.normalize_before = args.decoder_normalize_before\n\n # use layerNorm rather than FusedLayerNorm for exporting.\n # char_inputs can be used to determint this.\n # TODO remove this once we update apex with the fix\n export = getattr(args, 'char_inputs', False)\n self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\n\n if no_encoder_attn:\n self.encoder_attn = None\n self.encoder_attn_layer_norm = None\n else:\n self.encoder_attn = MultiheadAttention(\n self.embed_dim,\n args.decoder_attention_heads,\n dropout=args.attention_dropout,\n encoder_decoder_attention=True)\n bert_out_dim = args.bert_out_dim\n self.bert_attn = MultiheadAttention(self.embed_dim,\n args.decoder_attention_heads,\n kdim=bert_out_dim,\n vdim=bert_out_dim,\n dropout=args.attention_dropout,\n encoder_decoder_attention=True)\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim,\n export=export)\n\n self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)\n self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)\n\n self.final_layer_norm = LayerNorm(self.embed_dim, export=export)\n self.need_attn = True\n\n self.onnx_trace = False\n self.encoder_ratio = args.encoder_ratio\n self.bert_ratio = args.bert_ratio\n\n self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)\n self.encoder_bert_dropout_ratio = getattr(args,\n 'encoder_bert_dropout_ratio',\n 0.25)\n assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5\n self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)\n\n if not bert_gate:\n self.bert_ratio = 0.\n self.encoder_bert_dropout = False\n self.encoder_bert_mixup = False\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def forward(\n self,\n x,\n encoder_out=None,\n encoder_padding_mask=None,\n bert_encoder_out=None,\n bert_encoder_padding_mask=None,\n incremental_state=None,\n prev_self_attn_state=None,\n prev_attn_state=None,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n\n Returns:\n encoded output of shape `(batch, src_len, embed_dim)`\n \"\"\"\n residual = x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\n if prev_self_attn_state is not None:\n if incremental_state is None:\n incremental_state = {}\n prev_key, prev_value = prev_self_attn_state\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\n self.self_attn._set_input_buffer(incremental_state, saved_state)\n x, attn = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=self_attn_padding_mask,\n incremental_state=incremental_state,\n need_weights=False,\n attn_mask=self_attn_mask,\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\n\n if self.encoder_attn is not None:\n residual = x\n x = self.maybe_layer_norm(self.encoder_attn_layer_norm,\n x,\n before=True)\n if prev_attn_state is not None:\n if incremental_state is None:\n incremental_state = {}\n prev_key, prev_value = prev_attn_state\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\n self.encoder_attn._set_input_buffer(incremental_state,\n saved_state)\n x1, attn = self.encoder_attn(\n query=x,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n incremental_state=incremental_state,\n static_kv=True,\n need_weights=(not self.training and self.need_attn),\n )\n x2, _ = self.bert_attn(\n query=x,\n key=bert_encoder_out,\n value=bert_encoder_out,\n key_padding_mask=bert_encoder_padding_mask,\n incremental_state=incremental_state,\n static_kv=True,\n need_weights=(not self.training and self.need_attn),\n )\n x1 = F.dropout(x1, p=self.dropout, training=self.training)\n x2 = F.dropout(x2, p=self.dropout, training=self.training)\n ratios = self.get_ratio()\n x = residual + ratios[0] * x1 + ratios[1] * x2\n x = self.maybe_layer_norm(self.encoder_attn_layer_norm,\n x,\n after=True)\n\n residual = x\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\n if self.onnx_trace and incremental_state is not None:\n saved_state = self.self_attn._get_input_buffer(incremental_state)\n self_attn_state = saved_state[\"prev_key\"], saved_state[\"prev_value\"]\n return x, attn, self_attn_state\n return x, attn\n\n def get_ratio(self):\n if self.encoder_bert_dropout:\n frand = float(uniform(0, 1))\n if self.encoder_bert_mixup and self.training:\n return [frand, 1 - frand]\n if frand < self.encoder_bert_dropout_ratio and self.training:\n return [1, 0]\n elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:\n return [0, 1]\n else:\n return [0.5, 0.5]\n else:\n return [self.encoder_ratio, self.bert_ratio]\n\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\n assert before ^ after\n if after ^ self.normalize_before:\n return layer_norm(x)\n else:\n return x\n\n def make_generation_fast_(self, need_attn=False, **kwargs):\n self.need_attn = need_attn\n\n\nclass TransformerStandardDecoderLayer(nn.Module):\n \"\"\"Decoder layer block.\n\n In the original paper each operation (multi-head attention, encoder\n attention or FFN) is postprocessed with: `dropout -> add residual ->\n layernorm`. In the tensor2tensor code they suggest that learning is more\n robust when preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.decoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n def __init__(self,\n args,\n no_encoder_attn=False,\n add_bias_kv=False,\n add_zero_attn=False,\n bert_gate=True):\n super().__init__()\n self.embed_dim = args.decoder_embed_dim\n self.self_attn = MultiheadAttention(\n embed_dim=self.embed_dim,\n num_heads=args.decoder_attention_heads,\n dropout=args.attention_dropout,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n self_attention=True)\n self.dropout = args.dropout\n self.activation_fn = utils.get_activation_fn(\n activation=getattr(args, 'activation_fn', 'relu'))\n self.activation_dropout = getattr(args, 'activation_dropout', 0)\n if self.activation_dropout == 0:\n # for backwards compatibility with models that use args.relu_dropout\n self.activation_dropout = getattr(args, 'relu_dropout', 0)\n self.normalize_before = args.decoder_normalize_before\n\n # use layerNorm rather than FusedLayerNorm for exporting.\n # char_inputs can be used to determint this.\n # TODO remove this once we update apex with the fix\n export = getattr(args, 'char_inputs', False)\n self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\n\n if no_encoder_attn:\n self.encoder_attn = None\n self.encoder_attn_layer_norm = None\n else:\n self.encoder_attn = MultiheadAttention(\n self.embed_dim,\n args.decoder_attention_heads,\n dropout=args.attention_dropout,\n encoder_decoder_attention=True)\n # bert_out_dim = args.bert_out_dim\n # self.bert_attn = MultiheadAttention(\n # self.embed_dim, args.decoder_attention_heads,\n # kdim=bert_out_dim, vdim=bert_out_dim,\n # dropout=args.attention_dropout, encoder_decoder_attention=True\n # )\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim,\n export=export)\n\n self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)\n self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)\n\n self.final_layer_norm = LayerNorm(self.embed_dim, export=export)\n self.need_attn = True\n\n self.onnx_trace = False\n self.encoder_ratio = args.encoder_ratio\n self.bert_ratio = args.bert_ratio\n if not bert_gate:\n self.bert_ratio = 0.\n self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)\n self.encoder_bert_dropout_ratio = getattr(args,\n 'encoder_bert_dropout_ratio',\n 0.25)\n assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5\n self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def forward(\n self,\n x,\n encoder_out=None,\n encoder_padding_mask=None,\n bert_encoder_out=None,\n bert_encoder_padding_mask=None,\n incremental_state=None,\n prev_self_attn_state=None,\n prev_attn_state=None,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n\n Returns:\n encoded output of shape `(batch, src_len, embed_dim)`\n \"\"\"\n residual = x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\n if prev_self_attn_state is not None:\n if incremental_state is None:\n incremental_state = {}\n prev_key, prev_value = prev_self_attn_state\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\n self.self_attn._set_input_buffer(incremental_state, saved_state)\n x, attn = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=self_attn_padding_mask,\n incremental_state=incremental_state,\n need_weights=False,\n attn_mask=self_attn_mask,\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\n\n if self.encoder_attn is not None:\n residual = x\n x = self.maybe_layer_norm(self.encoder_attn_layer_norm,\n x,\n before=True)\n if prev_attn_state is not None:\n if incremental_state is None:\n incremental_state = {}\n prev_key, prev_value = prev_attn_state\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\n self.encoder_attn._set_input_buffer(incremental_state,\n saved_state)\n x1, attn = self.encoder_attn(\n query=x,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n incremental_state=incremental_state,\n static_kv=True,\n need_weights=(not self.training and self.need_attn),\n )\n # x2, _ = self.bert_attn(\n # query=x,\n # key=bert_encoder_out,\n # value=bert_encoder_out,\n # key_padding_mask=bert_encoder_padding_mask,\n # incremental_state=incremental_state,\n # static_kv=True,\n # need_weights=(not self.training and self.need_attn),\n # )\n x1 = F.dropout(x1, p=self.dropout, training=self.training)\n # x2 = F.dropout(x2, p=self.dropout, training=self.training)\n # ratios = self.get_ratio()\n x = residual + x1\n x = self.maybe_layer_norm(self.encoder_attn_layer_norm,\n x,\n after=True)\n\n residual = x\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\n if self.onnx_trace and incremental_state is not None:\n saved_state = self.self_attn._get_input_buffer(incremental_state)\n self_attn_state = saved_state[\"prev_key\"], saved_state[\"prev_value\"]\n return x, attn, self_attn_state\n return x, attn\n\n def get_ratio(self):\n if self.encoder_bert_dropout:\n frand = float(uniform(0, 1))\n if self.encoder_bert_mixup and self.training:\n return [frand, 1 - frand]\n if frand < self.encoder_bert_dropout_ratio and self.training:\n return [1, 0]\n elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:\n return [0, 1]\n else:\n return [0.5, 0.5]\n else:\n return [self.encoder_ratio, self.bert_ratio]\n\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\n assert before ^ after\n if after ^ self.normalize_before:\n return layer_norm(x)\n else:\n return x\n\n def make_generation_fast_(self, need_attn=False, **kwargs):\n self.need_attn = need_attn\n\n\nclass TransformerDecoderLayerStack(nn.Module):\n def __init__(self,\n args,\n no_encoder_attn=False,\n add_bias_kv=False,\n add_zero_attn=False):\n super().__init__()\n self.embed_dim = args.decoder_embed_dim\n self.self_attn = MultiheadAttention(\n embed_dim=self.embed_dim,\n num_heads=args.decoder_attention_heads,\n dropout=args.attention_dropout,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n )\n self.dropout = args.dropout\n self.activation_fn = utils.get_activation_fn(\n activation=getattr(args, 'activation_fn', 'relu'))\n self.activation_dropout = getattr(args, 'activation_dropout', 0)\n if self.activation_dropout == 0:\n # for backwards compatibility with models that use args.relu_dropout\n self.activation_dropout = getattr(args, 'relu_dropout', 0)\n self.normalize_before = args.decoder_normalize_before\n\n # use layerNorm rather than FusedLayerNorm for exporting.\n # char_inputs can be used to determint this.\n # TODO remove this once we update apex with the fix\n export = getattr(args, 'char_inputs', False)\n self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\n\n if no_encoder_attn:\n self.encoder_attn = None\n self.encoder_attn_layer_norm = None\n else:\n self.encoder_attn = MultiheadAttention(\n self.embed_dim,\n args.decoder_attention_heads,\n dropout=args.attention_dropout,\n encoder_decoder_attention=True)\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim,\n export=export)\n bert_out_dim = args.bert_out_dim\n self.bert_attn = MultiheadAttention(self.embed_dim,\n args.decoder_attention_heads,\n kdim=bert_out_dim,\n vdim=bert_out_dim,\n dropout=args.attention_dropout,\n encoder_decoder_attention=True)\n self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\n self.bert_first = args.bert_first\n self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)\n self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)\n\n self.final_layer_norm = LayerNorm(self.embed_dim, export=export)\n self.need_attn = True\n\n self.onnx_trace = False\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def forward(\n self,\n x,\n encoder_out=None,\n encoder_padding_mask=None,\n bert_encoder_out=None,\n bert_encoder_padding_mask=None,\n incremental_state=None,\n prev_self_attn_state=None,\n prev_attn_state=None,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n Returns:\n encoded output of shape `(batch, src_len, embed_dim)`\n \"\"\"\n residual = x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\n if prev_self_attn_state is not None:\n if incremental_state is None:\n incremental_state = {}\n prev_key, prev_value = prev_self_attn_state\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\n self.self_attn._set_input_buffer(incremental_state, saved_state)\n x, attn = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=self_attn_padding_mask,\n incremental_state=incremental_state,\n need_weights=False,\n attn_mask=self_attn_mask,\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\n\n if self.encoder_attn is not None:\n\n if prev_attn_state is not None:\n if incremental_state is None:\n incremental_state = {}\n prev_key, prev_value = prev_attn_state\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\n self.encoder_attn._set_input_buffer(incremental_state,\n saved_state)\n\n def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding,\n incremental_state):\n residual = x\n x = self.maybe_layer_norm(layer_norm, x, before=True)\n x, attn = attnlayer(\n query=x,\n key=keyorvalue,\n value=keyorvalue,\n key_padding_mask=key_padding,\n incremental_state=incremental_state,\n static_kv=True,\n need_weights=(not self.training and self.need_attn),\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(layer_norm, x, after=True)\n return x, attn\n\n if self.bert_first:\n x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm,\n bert_encoder_out, bert_encoder_padding_mask,\n incremental_state)\n x, attn = sinattn(self.encoder_attn, x,\n self.encoder_attn_layer_norm, encoder_out,\n encoder_padding_mask, incremental_state)\n else:\n x, attn = sinattn(self.encoder_attn, x,\n self.encoder_attn_layer_norm, encoder_out,\n encoder_padding_mask, incremental_state)\n x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm,\n bert_encoder_out, bert_encoder_padding_mask,\n incremental_state)\n\n residual = x\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\n if self.onnx_trace and incremental_state is not None:\n saved_state = self.self_attn._get_input_buffer(incremental_state)\n self_attn_state = saved_state[\"prev_key\"], saved_state[\"prev_value\"]\n return x, attn, self_attn_state\n return x, attn\n\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\n assert before ^ after\n if after ^ self.normalize_before:\n return layer_norm(x)\n else:\n return x\n\n def make_generation_fast_(self, need_attn=False, **kwargs):\n self.need_attn = need_attn\n\n\ndef Embedding(num_embeddings, embedding_dim, padding_idx):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)\n nn.init.constant_(m.weight[padding_idx], 0)\n return m\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.)\n return m\n\n\n@register_model_architecture('transformer', 'transformer')\ndef base_architecture(args):\n args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before',\n False)\n args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim',\n args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim',\n args.encoder_ffn_embed_dim)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before',\n False)\n args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.)\n args.activation_dropout = getattr(args, 'activation_dropout', 0.)\n args.activation_fn = getattr(args, 'activation_fn', 'relu')\n args.dropout = getattr(args, 'dropout', 0.1)\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff',\n None)\n args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)\n args.share_decoder_input_output_embed = getattr(\n args, 'share_decoder_input_output_embed', False)\n args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)\n args.no_token_positional_embeddings = getattr(\n args, 'no_token_positional_embeddings', False)\n args.adaptive_input = getattr(args, 'adaptive_input', False)\n\n args.decoder_output_dim = getattr(args, 'decoder_output_dim',\n args.decoder_embed_dim)\n args.decoder_input_dim = getattr(args, 'decoder_input_dim',\n args.decoder_embed_dim)\n\n\n@register_model_architecture('transformers2', 'transformers2')\ndef base_architecture_s2(args):\n args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before',\n False)\n args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim',\n args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim',\n args.encoder_ffn_embed_dim)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before',\n False)\n args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.)\n args.activation_dropout = getattr(args, 'activation_dropout', 0.)\n args.activation_fn = getattr(args, 'activation_fn', 'relu')\n args.dropout = getattr(args, 'dropout', 0.1)\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff',\n None)\n args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)\n args.share_decoder_input_output_embed = getattr(\n args, 'share_decoder_input_output_embed', False)\n args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)\n args.no_token_positional_embeddings = getattr(\n args, 'no_token_positional_embeddings', False)\n args.adaptive_input = getattr(args, 'adaptive_input', False)\n\n args.decoder_output_dim = getattr(args, 'decoder_output_dim',\n args.decoder_embed_dim)\n args.decoder_input_dim = getattr(args, 'decoder_input_dim',\n args.decoder_embed_dim)\n\n\n@register_model_architecture('transformerstack', 'transformerstack')\ndef base_stack_architecture(args):\n args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before',\n False)\n args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim',\n args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim',\n args.encoder_ffn_embed_dim)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before',\n False)\n args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.)\n args.activation_dropout = getattr(args, 'activation_dropout', 0.)\n args.activation_fn = getattr(args, 'activation_fn', 'relu')\n args.dropout = getattr(args, 'dropout', 0.1)\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff',\n None)\n args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)\n args.share_decoder_input_output_embed = getattr(\n args, 'share_decoder_input_output_embed', False)\n args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)\n args.no_token_positional_embeddings = getattr(\n args, 'no_token_positional_embeddings', False)\n args.adaptive_input = getattr(args, 'adaptive_input', False)\n\n args.decoder_output_dim = getattr(args, 'decoder_output_dim',\n args.decoder_embed_dim)\n args.decoder_input_dim = getattr(args, 'decoder_input_dim',\n args.decoder_embed_dim)\n\n\n@register_model_architecture('transformer', 'transformer_iwslt_de_en')\ndef transformer_iwslt_de_en(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n base_architecture(args)\n\n\n@register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en')\ndef transformer_s2_iwslt_de_en(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n base_architecture_s2(args)\n\n\n@register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en')\ndef transformerstack_iwslt_de_en(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n base_stack_architecture(args)\n\n\n@register_model_architecture('transformers2', 'transformer_wmt_en_de')\ndef transformer_wmt_en_de(args):\n base_architecture_s2(args)\n\n\n# parameters used in the \"Attention Is All You Need\" paper (Vaswani et al., 2017)\n@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big')\ndef transformer_vaswani_wmt_en_de_big(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before',\n False)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)\n args.dropout = getattr(args, 'dropout', 0.3)\n base_architecture(args)\n\n\n@register_model_architecture('transformers2',\n 'transformer_s2_vaswani_wmt_en_de_big')\ndef transformer_s2_vaswani_wmt_en_de_big(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before',\n False)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)\n args.dropout = getattr(args, 'dropout', 0.3)\n base_architecture_s2(args)\n\n\n@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big')\ndef transformer_vaswani_wmt_en_fr_big(args):\n args.dropout = getattr(args, 'dropout', 0.1)\n transformer_vaswani_wmt_en_de_big(args)\n\n\n@register_model_architecture('transformer', 'transformer_wmt_en_de_big')\ndef transformer_wmt_en_de_big(args):\n args.attention_dropout = getattr(args, 'attention_dropout', 0.1)\n transformer_vaswani_wmt_en_de_big(args)\n\n\n# default parameters used in tensor2tensor implementation\n@register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')\ndef transformer_wmt_en_de_big_t2t(args):\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before',\n True)\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before',\n True)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.1)\n args.activation_dropout = getattr(args, 'activation_dropout', 0.1)\n transformer_vaswani_wmt_en_de_big(args)\n" ]
[ [ "torch.nn.Linear", "torch.nn.ModuleList", "torch.nn.init.constant_", "torch.nn.functional.dropout", "torch.FloatTensor", "torch.nn.init.xavier_uniform_", "torch.nn.init.normal_", "torch.nn.functional.linear", "numpy.random.uniform", "torch.Tensor", "torch.nn.Embedding" ] ]
silveto/pandas
[ "fe9aa125c19ce2b22a0c4aabedd68b24df6cb98e" ]
[ "pandas/compat/pickle_compat.py" ]
[ "\"\"\" support pre 0.12 series pickle compatibility \"\"\"\n\nimport sys\nimport numpy as np\nimport pandas\nimport copy\nimport pickle as pkl\nfrom pandas import compat\nfrom pandas.compat import u, string_types\nfrom pandas.core.series import Series, TimeSeries\nfrom pandas.sparse.series import SparseSeries, SparseTimeSeries\n\n\ndef load_reduce(self):\n stack = self.stack\n args = stack.pop()\n func = stack[-1]\n if type(args[0]) is type:\n n = args[0].__name__\n if n == u('DeprecatedSeries') or n == u('DeprecatedTimeSeries'):\n stack[-1] = object.__new__(Series)\n return\n elif (n == u('DeprecatedSparseSeries') or\n n == u('DeprecatedSparseTimeSeries')):\n stack[-1] = object.__new__(SparseSeries)\n return\n\n try:\n value = func(*args)\n except:\n\n # try to reencode the arguments\n if getattr(self,'encoding',None) is not None:\n args = tuple([arg.encode(self.encoding)\n if isinstance(arg, string_types)\n else arg for arg in args])\n try:\n stack[-1] = func(*args)\n return\n except:\n pass\n\n if getattr(self,'is_verbose',None):\n print(sys.exc_info())\n print(func, args)\n raise\n\n stack[-1] = value\n\nif compat.PY3:\n class Unpickler(pkl._Unpickler):\n pass\nelse:\n class Unpickler(pkl.Unpickler):\n pass\n\nUnpickler.dispatch = copy.copy(Unpickler.dispatch)\nUnpickler.dispatch[pkl.REDUCE[0]] = load_reduce\n\n\ndef load(fh, encoding=None, compat=False, is_verbose=False):\n \"\"\"load a pickle, with a provided encoding\n\n if compat is True:\n fake the old class hierarchy\n if it works, then return the new type objects\n\n Parameters\n ----------\n fh: a filelike object\n encoding: an optional encoding\n compat: provide Series compatibility mode, boolean, default False\n is_verbose: show exception output\n \"\"\"\n\n try:\n if compat:\n pandas.core.series.Series = DeprecatedSeries\n pandas.core.series.TimeSeries = DeprecatedTimeSeries\n pandas.sparse.series.SparseSeries = DeprecatedSparseSeries\n pandas.sparse.series.SparseTimeSeries = DeprecatedSparseTimeSeries\n fh.seek(0)\n if encoding is not None:\n up = Unpickler(fh, encoding=encoding)\n else:\n up = Unpickler(fh)\n up.is_verbose = is_verbose\n\n return up.load()\n except:\n raise\n finally:\n if compat:\n pandas.core.series.Series = Series\n pandas.core.series.Series = TimeSeries\n pandas.sparse.series.SparseSeries = SparseSeries\n pandas.sparse.series.SparseTimeSeries = SparseTimeSeries\n\n\nclass DeprecatedSeries(np.ndarray, Series):\n pass\n\n\nclass DeprecatedTimeSeries(DeprecatedSeries):\n pass\n\n\nclass DeprecatedSparseSeries(DeprecatedSeries):\n pass\n\n\nclass DeprecatedSparseTimeSeries(DeprecatedSparseSeries):\n pass\n" ]
[ [ "pandas.compat.u" ] ]
udellgroup/GaussianCopulaImp
[ "237945f139bf0ba53e8d462b48c5683c45b2ce79" ]
[ "gcimpute/helper_evaluation.py" ]
[ "import numpy as np \nfrom scipy.stats import random_correlation, norm, expon\nfrom scipy.linalg import svdvals\nimport warnings\nwarnings.filterwarnings(\"error\")\nfrom collections import defaultdict\n\ndef get_mae(x_imp, x_true, x_obs=None):\n \"\"\"\n gets Mean Absolute Error (MAE) between x_imp and x_true\n \"\"\"\n x_imp = np.asarray(x_imp)\n x_true = np.asarray(x_true)\n if x_obs is not None:\n x_obs = np.asarray(x_obs)\n loc = np.isnan(x_obs) & (~np.isnan(x_true))\n else:\n loc = ~np.isnan(x_true)\n diff = x_imp[loc] - x_true[loc]\n return np.mean(np.abs(diff))\n\n\ndef get_rmse(x_imp, x_true, x_obs = None, relative=False):\n \"\"\"\n gets Root Mean Squared Error (RMSE) or Normalized Root Mean Squared Error (NRMSE) between x_imp and x_true\n\n Parameters\n ----------\n x_imp : array-like of shape (nsamples, nfeatures)\n Imputed complete matrix\n x_true : array-like of shape (nsamples, nfeatures)\n True matrix. Can be incomplete\n x_obs : array-like of shape (nsamples, nfeatures) or None\n Observed incomplete matrix.\n The evaluation entries are those observed in x_true but not in x_obs\n If None, evaluation entries are those observed in x_true.\n relative : bool, default=False\n Return NRMSE if True and RMSE if False\n \"\"\"\n x_imp = np.asarray(x_imp)\n x_true = np.asarray(x_true)\n if x_obs is not None:\n x_obs = np.asarray(x_obs)\n loc = np.isnan(x_obs) & (~np.isnan(x_true))\n else:\n loc = ~np.isnan(x_true)\n diff = x_imp[loc] - x_true[loc]\n #mse = np.mean(diff**2.0, axis=0)\n mse = np.mean(np.power(diff, 2))\n rmse = np.sqrt(mse)\n if not relative:\n return rmse\n else:\n # RMSE of zero-imputation\n norm = np.sqrt(np.mean(np.power(x_true[loc],2)))\n return rmse/norm\n\ndef get_smae(x_imp, x_true, x_obs, \n baseline=None, per_type=False, \n var_types = {'cont':list(range(5)), 'ord':list(range(5, 10)), 'bin':list(range(10, 15))}):\n \"\"\"\n gets Scaled Mean Absolute Error (SMAE) between x_imp and x_true\n \"\"\"\n x_imp = np.asarray(x_imp)\n x_true = np.asarray(x_true)\n x_obs = np.asarray(x_obs)\n\n p = x_obs.shape[1]\n # the first column records the imputation error of x_imp,\n # while the second column records the imputation error of baseline\n error = np.zeros((p,2))\n\n # iterate over columns/variables\n for i, col in enumerate(x_obs.T):\n test = np.bitwise_and(~np.isnan(x_true[:,i]), np.isnan(col))\n # skip the column if there is no evaluation entry\n if np.sum(test) == 0:\n error[i,0] = np.nan\n error[i,1] = np.nan\n print(f'There is no entry to be evaluated in variable {i}.')\n continue\n \n base_imp = np.median(col[~np.isnan(col)]) if baseline is None else baseline[i]\n\n x_true_col = x_true[test,i]\n x_imp_col = x_imp[test,i]\n diff = np.abs(x_imp_col - x_true_col)\n base_diff = np.abs(base_imp - x_true_col)\n error[i,0] = np.sum(diff)\n error[i,1] = np.sum(base_diff)\n\n if per_type:\n scaled_diffs = {}\n for name, val in var_types.items():\n try:\n scaled_diffs[name] = np.sum(error[val,0])/np.sum(error[val,1])\n except RuntimeWarning:\n print(f'Baseline imputation achieves zero imputation error in some variable.') \n raise\n else:\n try:\n scaled_diffs = error[:,0] / error[:,1]\n except RuntimeWarning: \n print(f'Baseline imputation achieves zero imputation error in some variable.') \n raise\n\n return scaled_diffs\n\ndef batch_iterable(X, batch_size=40):\n '''\n Generator which returns a mini-batch view of X.\n '''\n n = X.shape[0]\n start = 0\n while start < n:\n end = min(start + batch_size, n)\n yield X[start:end]\n start = end\n\ndef get_smae_batch(x_imp, x_true, x_obs, \n batch_size = 40,\n baseline=None, per_type=False, \n var_types = {'cont':list(range(5)), 'ord':list(range(5, 10)), 'bin':list(range(10, 15))}):\n '''\n Compute SMAE in the unit of a mini-batch\n '''\n x_imp = np.asarray(x_imp)\n x_true = np.asarray(x_true)\n x_obs = np.asarray(x_obs)\n\n result = defaultdict(list) if per_type else []\n baseline = np.nanmedian(x_obs,0) if baseline is None else baseline\n for imp, true, obs in zip(batch_iterable(x_imp,batch_size), batch_iterable(x_true,batch_size), batch_iterable(x_obs,batch_size)):\n scaled_diffs = get_smae(imp, true, obs, baseline=baseline, per_type=per_type)\n if per_type:\n for name, val in scaled_diffs.items():\n result[name].append(val)\n else:\n result.append(scaled_diffs)\n\n return result\n\ndef get_scaled_error(sigma_imp, sigma):\n \"\"\"\n gets a scaled error between matrices |simga - sigma_imp|_F^2 / |sigma|_F^2\n \"\"\"\n return np.linalg.norm(sigma - sigma_imp) / np.linalg.norm(sigma)\n\ndef grassman_dist(A,B):\n U1, d1, _ = np.linalg.svd(A, full_matrices = False)\n U2, d2, _ = np.linalg.svd(B, full_matrices = False)\n d = svdvals(np.dot(U1.T, U2))\n theta = np.arccos(d)\n return np.linalg.norm(theta), np.linalg.norm(d1-d2)\n\ndef error_by_reliability(error, r, xtrue, ximp, num=100, start=1):\n q = np.linspace(0, 1-start/num, num=num)\n r_q = np.nanquantile(r, q)\n err = np.zeros(num)\n loc_missing = ~np.isnan(r)\n r, xtrue, ximp = r[loc_missing], xtrue[loc_missing], ximp[loc_missing]\n for i,x in enumerate(r_q):\n # keep entries with top reliabilities \n loc_q = r >= x\n\n val, imp = xtrue[loc_q], ximp[loc_q]\n if error == 'NRMSE':\n err[i] = np.sqrt(np.power(val-imp, 2).mean()) / np.sqrt(np.power(val, 2).mean()) \n elif error == 'RMSE':\n err[i] = np.sqrt(np.power(val-imp, 2).mean()) \n elif error == 'MAE':\n err[i] = np.abs(val-imp).mean()\n else: \n raise ValueError('Error can only be one of NRMSE, RMSE, MAE.')\n return err\n\n\n\n\n" ]
[ [ "numpy.arccos", "numpy.linalg.norm", "numpy.asarray", "numpy.zeros", "numpy.dot", "numpy.nanquantile", "numpy.sum", "numpy.isnan", "numpy.power", "numpy.linalg.svd", "numpy.sqrt", "numpy.abs", "numpy.linspace", "numpy.nanmedian" ] ]
JimmyZhang12/predict-T
[ "8ae818b0791104de20633ce91e6d633cda7445b3" ]
[ "python/jimmy_plot/deprecated/front_end_preds/supply_voltage_over_cycles.py" ]
[ "#ASSUMES DATA WITH THROTTLING, NO DECOR STALL\n\nimport os\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass Cycle_Dump:\n def __init__(self):\n self.ve_count = 0\n self.action_count = 0\n return\n\n def reset(self):\n self.ve_flag = False\n self.ve_flag_prev = []\n self.action_flag = False\n self.cycle = None\n self.supply_curr = None\n self.supply_volt = None\n self.pred_state = None\n self.numCycles_var = None\n return\n\n def num_voltage_emergency(self, line):\n linespl = line.split()\n ve_read = int(linespl[1])\n if ve_read > self.ve_count:\n self.ve_count = ve_read\n self.ve_flag = True\n return\n def total_action(self, line):\n linespl = line.split()\n action_read = int(linespl[1])\n if action_read > self.action_count:\n self.action_count = action_read\n self.action_flag = True\n return\n def counter(self, line):\n linespl = line.split()\n self.cycle = int(linespl[1])\n return\n def state(self, line):\n linespl = line.split()\n self.pred_state = int(linespl[1])\n return\n def supply_current(self, line):\n linespl = line.split()\n self.supply_curr = float(linespl[1])\n return\n def supply_voltage(self, line):\n linespl = line.split()\n self.supply_volt = float(linespl[1])\n return\n def numCycles(self,line):\n linespl = line.split()\n self.numCycles_var = int(linespl[1])\n return\n #PARAMETERS\nHOME = os.environ['HOME']\nPREDICTOR = 'HarvardPowerPredictor_1'\nCLASS = 'DESKTOP'\nTEST = 'different_cycle'\npath = HOME + '/output_11_18/gem5_out/' + CLASS + '_' + PREDICTOR + '/' + TEST + '.txt'\nprint(path)\n#PARAMETERS\nstats = open(path, 'r')\n\nfig = plt.figure(figsize=(10,5))\nax = plt.axes()\nfig.suptitle('Supply Voltage Over Time' + '(' + PREDICTOR + ', ' + CLASS + ', ' + TEST + ' )', fontsize=14)\nax.set_xlabel('Cycle', fontsize=14) \nax.set_ylabel('Supply Voltage', fontsize=14)\nax2 = ax.twinx()\nax2.set_ylabel('Current', color='tab:blue') # we already handled the x-label with ax1\nvoltage = [0]\ncurrent =[0]\n\n#read line by lin\nline = stats.readline()\nline = stats.readline()\n\ncycle_dump = Cycle_Dump()\nwhile line:\n cycle_dump.reset()\n while(True): \n #one cycle worth of stat dumps \n if 'Begin Simulation Statistics' in line or not line:\n break\n stat_name = line.split()[0].split('.')[-1].split(':')[0]\n func = getattr(cycle_dump, stat_name, False)\n if func:\n func(line)\n line = stats.readline() \n \n for _ in range(cycle_dump.numCycles_var):\n voltage.append(None)\n current.append(None)\n #implicit assumption stat dumps are every 2 cycles\n voltage[-1] = cycle_dump.supply_volt\n current[-1] = cycle_dump.supply_curr\n voltage[-2] = (voltage[-1]+voltage[-3])/2\n current[-2] = (current[-1]+current[-3])/2\n if cycle_dump.ve_flag:\n ax.axvspan(len(voltage), len(voltage)+1, color='blue', alpha=0.15)\n #if cycle_dump.action_flag:\n # ax.axvspan(len(voltage), len(voltage)+1, color='red', alpha=0.3)\n line = stats.readline()\n if cycle_dump.cycle > 10000:\n break\n\nxvar = np.linspace(0,len(voltage),len(voltage))\nstart_cycle = 8000\nend_cycle = 8500\nax.plot(xvar, voltage,color='black', linewidth=1.0)\nax.set_ylim(bottom = min(i for i in voltage if i > 0.8), top = max(voltage))\n\nax2.plot(xvar, current, color='tab:blue')\nax2.tick_params(axis='y', labelcolor='tab:blue')\nax2.set_ylim([min(i for i in current if i > 0.8), max(current)])\n\nplt.xlim(left = start_cycle, right = end_cycle)\nplt.savefig(HOME +'/passat/plot/11-18_Supply_Volt+Curr_Over_Time' + '_' + PREDICTOR + '_' + CLASS + '_' + TEST +'.png', dpi=300)\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.xlim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure", "matplotlib.pyplot.axes" ] ]
AlvinIsonomia/darts.pytorch1.3_MultiGPU
[ "826843454cd1ace9cbb0410981856e51000b407e" ]
[ "rnn/architect.py" ]
[ "import torch\nimport numpy as np\nimport torch.nn as nn\n\ndef _concat(xs):\n return torch.cat([x.view(-1) for x in xs])\n\n\ndef _clip(grads, max_norm):\n total_norm = 0\n for g in grads:\n param_norm = g.data.norm(2)\n total_norm += param_norm ** 2\n total_norm = total_norm ** 0.5\n clip_coef = max_norm / (total_norm + 1e-6)\n if clip_coef < 1:\n for g in grads:\n g.data.mul_(clip_coef)\n return clip_coef\n\n\nclass Architect(object):\n\n def __init__(self, model, args):\n self.network_weight_decay = args.wdecay\n self.network_clip = args.clip\n self.model = model\n self.optimizer = torch.optim.Adam(self.model.arch_parameters(),\n lr=args.arch_lr, weight_decay=args.arch_wdecay)\n self.optimizer = nn.DataParallel(self.optimizer, device_ids=[0,1])\n\n def _compute_unrolled_model(self, hidden, input, target, eta):\n loss, hidden_next = self.model._loss(hidden, input, target)\n theta = _concat(self.model.parameters()).data\n grads = torch.autograd.grad(loss, self.model.parameters())\n clip_coef = _clip(grads, self.network_clip)\n dtheta = _concat(grads).data + self.network_weight_decay*theta\n unrolled_model = self._construct_model_from_theta(theta.sub(eta, dtheta))\n return unrolled_model, clip_coef\n\n def step(self,\n hidden_train, input_train, target_train,\n hidden_valid, input_valid, target_valid,\n network_optimizer, unrolled):\n eta = network_optimizer.param_groups[0]['lr']\n self.optimizer.zero_grad()\n if unrolled:\n hidden = self._backward_step_unrolled(hidden_train, input_train, target_train, hidden_valid, input_valid, target_valid, eta)\n else:\n hidden = self._backward_step(hidden_valid, input_valid, target_valid)\n self.optimizer.module.step()\n return hidden, None\n\n def _backward_step(self, hidden, input, target):\n loss, hidden_next = self.model._loss(hidden, input, target)\n loss.backward()\n return hidden_next\n\n def _backward_step_unrolled(self,\n hidden_train, input_train, target_train,\n hidden_valid, input_valid, target_valid, eta):\n unrolled_model, clip_coef = self._compute_unrolled_model(hidden_train, input_train, target_train, eta)\n unrolled_loss, hidden_next = unrolled_model._loss(hidden_valid, input_valid, target_valid)\n\n unrolled_loss.backward()\n dalpha = [v.grad for v in unrolled_model.arch_parameters()]\n dtheta = [v.grad for v in unrolled_model.parameters()]\n _clip(dtheta, self.network_clip)\n vector = [dt.data for dt in dtheta]\n implicit_grads = self._hessian_vector_product(vector, hidden_train, input_train, target_train, r=1e-2)\n\n for g, ig in zip(dalpha, implicit_grads):\n g.data.sub_(eta * clip_coef, ig.data)\n\n for v, g in zip(self.model.arch_parameters(), dalpha):\n if v.grad is None:\n v.grad = g.detach()\n else:\n v.grad.data.copy_(g.data)\n return hidden_next\n\n def _construct_model_from_theta(self, theta):\n model_new = self.model.new()\n model_dict = self.model.state_dict()\n\n params, offset = {}, 0\n for k, v in self.model.named_parameters():\n v_length = np.prod(v.size())\n params[k] = theta[offset: offset+v_length].view(v.size())\n offset += v_length\n\n assert offset == len(theta)\n model_dict.update(params)\n model_new.load_state_dict(model_dict)\n return model_new.cuda()\n\n def _hessian_vector_product(self, vector, hidden, input, target, r=1e-2):\n R = r / _concat(vector).norm()\n for p, v in zip(self.model.parameters(), vector):\n p.data.add_(R, v)\n loss, _ = self.model._loss(hidden, input, target)\n grads_p = torch.autograd.grad(loss, self.model.arch_parameters())\n\n for p, v in zip(self.model.parameters(), vector):\n p.data.sub_(2*R, v)\n loss, _ = self.model._loss(hidden, input, target)\n grads_n = torch.autograd.grad(loss, self.model.arch_parameters())\n\n for p, v in zip(self.model.parameters(), vector):\n p.data.add_(R, v)\n\n return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]\n\n" ]
[ [ "torch.nn.DataParallel" ] ]
lmjohns3/cube-experiment
[ "ab6d1a9df95efebc369d184ab1c748d73d5c3313" ]
[ "analysis/03-fill-dropouts-svt.py" ]
[ "#!/usr/bin/env python\n\nimport climate\nimport lmj.cubes\nimport lmj.cubes.fill\nimport numpy as np\nimport pandas as pd\n\nlogging = climate.get_logger('fill')\n\ndef svt(dfs, threshold, window):\n '''Complete missing marker data using singular value thresholding.\n\n This method alters the given `dfs` in-place.\n\n Parameters\n ----------\n dfs : list of pd.DataFrame\n Frames of source data. The frames will be stacked into a single large\n frame to use during SVT. This stacked frame will then be split and\n returned.\n threshold : float\n Threshold for singular values. If none, use a value computed from the\n spectrum of singular values.\n window : int\n Model windows of this many consecutive frames.\n '''\n df = lmj.cubes.fill.stack(dfs, window)\n centers = lmj.cubes.fill.center(df)\n pos, vis, data_norm = lmj.cubes.fill.window(df, window)\n\n # if the threshold is none, set it using the falloff point in the spectrum.\n if threshold is None:\n s = pd.Series(np.linalg.svd(pos, compute_uv=False))\n v = pd.rolling_mean(11, s.diff(), center=True)\n a = pd.rolling_mean(11, v.diff().shift(-1), center=True)\n threshold = s[a.argmax()]\n logging.info('using threshold %.2f', threshold)\n\n i = 0\n dist = 1\n x = y = np.zeros_like(pos)\n while dist >= lmj.cubes.fill.PHASESPACE_TOLERANCE:\n i += 1\n err = vis * (pos - x)\n y += err\n u, s, v = np.linalg.svd(y, full_matrices=False)\n s = np.clip(s - threshold, 0, np.inf)\n x = np.dot(u * s, v)\n dist = abs(err[vis]).mean()\n logging.info('%d: error %f (%d); mean %f; %s',\n i, (err * err).sum() / data_norm, len(s.nonzero()[0]), dist,\n np.percentile(abs(err[vis]), [50, 90, 95, 99]).round(4))\n\n lmj.cubes.fill.update(df, x, window)\n lmj.cubes.fill.restore(df, centers)\n lmj.cubes.fill.unstack(df, dfs)\n\n\ndef main(args):\n lmj.cubes.fill.main(svt, args, args.svt_threshold, args.window)\n\n\nif __name__ == '__main__':\n climate.call(main)\n" ]
[ [ "numpy.zeros_like", "numpy.dot", "numpy.linalg.svd", "numpy.clip" ] ]
dorcoh/scikit-learn
[ "374bc511540ef995b2a693ffc5334a71f723619f" ]
[ "sklearn/utils/estimator_checks.py" ]
[ "import types\nimport warnings\nimport pickle\nimport re\nfrom copy import deepcopy\nfrom functools import partial, wraps\nfrom inspect import signature\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy.stats import rankdata\nimport joblib\n\nfrom . import IS_PYPY\nfrom .. import config_context\nfrom ._testing import _get_args\nfrom ._testing import assert_raise_message\nfrom ._testing import assert_array_equal\nfrom ._testing import assert_array_almost_equal\nfrom ._testing import assert_allclose\nfrom ._testing import assert_allclose_dense_sparse\nfrom ._testing import set_random_state\nfrom ._testing import SkipTest\nfrom ._testing import ignore_warnings\nfrom ._testing import create_memmap_backed_data\nfrom ._testing import raises\nfrom . import is_scalar_nan\n\nfrom ..linear_model import LogisticRegression\nfrom ..linear_model import Ridge\n\nfrom ..base import (\n clone,\n ClusterMixin,\n is_classifier,\n is_regressor,\n is_outlier_detector,\n RegressorMixin,\n _is_pairwise,\n)\n\nfrom ..metrics import accuracy_score, adjusted_rand_score, f1_score\nfrom ..random_projection import BaseRandomProjection\nfrom ..feature_selection import SelectKBest\nfrom ..pipeline import make_pipeline\nfrom ..exceptions import DataConversionWarning\nfrom ..exceptions import NotFittedError\nfrom ..exceptions import SkipTestWarning\nfrom ..model_selection import train_test_split\nfrom ..model_selection import ShuffleSplit\nfrom ..model_selection._validation import _safe_split\nfrom ..metrics.pairwise import (rbf_kernel, linear_kernel, pairwise_distances)\n\nfrom .import shuffle\nfrom ._tags import (\n _DEFAULT_TAGS,\n _safe_tags,\n)\nfrom .validation import has_fit_parameter, _num_samples\nfrom ..preprocessing import StandardScaler\nfrom ..preprocessing import scale\nfrom ..datasets import (\n load_iris,\n make_blobs,\n make_multilabel_classification,\n make_regression\n)\n\nREGRESSION_DATASET = None\nCROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']\n\n\ndef _yield_checks(estimator):\n name = estimator.__class__.__name__\n tags = _safe_tags(estimator)\n pairwise = _is_pairwise(estimator)\n\n yield check_no_attributes_set_in_init\n yield check_estimators_dtypes\n yield check_fit_score_takes_y\n yield check_sample_weights_pandas_series\n yield check_sample_weights_not_an_array\n yield check_sample_weights_list\n yield check_sample_weights_shape\n if has_fit_parameter(estimator, \"sample_weight\") and not pairwise:\n # We skip pairwise because the data is not pairwise\n yield partial(check_sample_weights_invariance, kind='ones')\n yield partial(check_sample_weights_invariance, kind='zeros')\n yield check_estimators_fit_returns_self\n yield partial(check_estimators_fit_returns_self, readonly_memmap=True)\n\n # Check that all estimator yield informative messages when\n # trained on empty datasets\n if not tags[\"no_validation\"]:\n yield check_complex_data\n yield check_dtype_object\n yield check_estimators_empty_data_messages\n\n if name not in CROSS_DECOMPOSITION:\n # cross-decomposition's \"transform\" returns X and Y\n yield check_pipeline_consistency\n\n if not tags[\"allow_nan\"] and not tags[\"no_validation\"]:\n # Test that all estimators check their input for NaN's and infs\n yield check_estimators_nan_inf\n\n if pairwise:\n # Check that pairwise estimator throws error on non-square input\n yield check_nonsquare_error\n\n yield check_estimators_overwrite_params\n if hasattr(estimator, 'sparsify'):\n yield check_sparsify_coefficients\n\n yield check_estimator_sparse_data\n\n # Test that estimators can be pickled, and once pickled\n # give the same answer as before.\n yield check_estimators_pickle\n\n yield check_estimator_get_tags_default_keys\n\ndef _yield_classifier_checks(classifier):\n tags = _safe_tags(classifier)\n\n # test classifiers can handle non-array data and pandas objects\n yield check_classifier_data_not_an_array\n # test classifiers trained on a single label always return this label\n yield check_classifiers_one_label\n yield check_classifiers_classes\n yield check_estimators_partial_fit_n_features\n if tags[\"multioutput\"]:\n yield check_classifier_multioutput\n # basic consistency testing\n yield check_classifiers_train\n yield partial(check_classifiers_train, readonly_memmap=True)\n yield partial(check_classifiers_train, readonly_memmap=True,\n X_dtype='float32')\n yield check_classifiers_regression_target\n if tags[\"multilabel\"]:\n yield check_classifiers_multilabel_representation_invariance\n if not tags[\"no_validation\"]:\n yield check_supervised_y_no_nan\n if not tags['multioutput_only']:\n yield check_supervised_y_2d\n if tags[\"requires_fit\"]:\n yield check_estimators_unfitted\n if 'class_weight' in classifier.get_params().keys():\n yield check_class_weight_classifiers\n\n yield check_non_transformer_estimators_n_iter\n # test if predict_proba is a monotonic transformation of decision_function\n yield check_decision_proba_consistency\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_supervised_y_no_nan(name, estimator_orig):\n # Checks that the Estimator targets are not NaN.\n estimator = clone(estimator_orig)\n rng = np.random.RandomState(888)\n X = rng.randn(10, 5)\n y = np.full(10, np.inf)\n y = _enforce_estimator_tags_y(estimator, y)\n\n match = (\n \"Input contains NaN, infinity or a value too large for \"\n r\"dtype\\('float64'\\).\"\n )\n err_msg = (\n f\"Estimator {name} should have raised error on fitting \"\n \"array y with NaN value.\"\n )\n with raises(ValueError, match=match, err_msg=err_msg):\n estimator.fit(X, y)\n\n\ndef _yield_regressor_checks(regressor):\n tags = _safe_tags(regressor)\n # TODO: test with intercept\n # TODO: test with multiple responses\n # basic testing\n yield check_regressors_train\n yield partial(check_regressors_train, readonly_memmap=True)\n yield partial(check_regressors_train, readonly_memmap=True,\n X_dtype='float32')\n yield check_regressor_data_not_an_array\n yield check_estimators_partial_fit_n_features\n if tags[\"multioutput\"]:\n yield check_regressor_multioutput\n yield check_regressors_no_decision_function\n if not tags[\"no_validation\"] and not tags['multioutput_only']:\n yield check_supervised_y_2d\n yield check_supervised_y_no_nan\n name = regressor.__class__.__name__\n if name != 'CCA':\n # check that the regressor handles int input\n yield check_regressors_int\n if tags[\"requires_fit\"]:\n yield check_estimators_unfitted\n yield check_non_transformer_estimators_n_iter\n\n\ndef _yield_transformer_checks(transformer):\n tags = _safe_tags(transformer)\n # All transformers should either deal with sparse data or raise an\n # exception with type TypeError and an intelligible error message\n if not tags[\"no_validation\"]:\n yield check_transformer_data_not_an_array\n # these don't actually fit the data, so don't raise errors\n yield check_transformer_general\n if tags[\"preserves_dtype\"]:\n yield check_transformer_preserve_dtypes\n yield partial(check_transformer_general, readonly_memmap=True)\n if not _safe_tags(transformer, key=\"stateless\"):\n yield check_transformers_unfitted\n # Dependent on external solvers and hence accessing the iter\n # param is non-trivial.\n external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',\n 'RandomizedLasso', 'LogisticRegressionCV']\n\n name = transformer.__class__.__name__\n if name not in external_solver:\n yield check_transformer_n_iter\n\n\ndef _yield_clustering_checks(clusterer):\n yield check_clusterer_compute_labels_predict\n name = clusterer.__class__.__name__\n if name not in ('WardAgglomeration', \"FeatureAgglomeration\"):\n # this is clustering on the features\n # let's not test that here.\n yield check_clustering\n yield partial(check_clustering, readonly_memmap=True)\n yield check_estimators_partial_fit_n_features\n yield check_non_transformer_estimators_n_iter\n\n\ndef _yield_outliers_checks(estimator):\n\n # checks for outlier detectors that have a fit_predict method\n if hasattr(estimator, 'fit_predict'):\n yield check_outliers_fit_predict\n\n # checks for estimators that can be used on a test set\n if hasattr(estimator, 'predict'):\n yield check_outliers_train\n yield partial(check_outliers_train, readonly_memmap=True)\n # test outlier detectors can handle non-array data\n yield check_classifier_data_not_an_array\n # test if NotFittedError is raised\n if _safe_tags(estimator, key=\"requires_fit\"):\n yield check_estimators_unfitted\n\n\ndef _yield_all_checks(estimator):\n name = estimator.__class__.__name__\n tags = _safe_tags(estimator)\n if \"2darray\" not in tags[\"X_types\"]:\n warnings.warn(\"Can't test estimator {} which requires input \"\n \" of type {}\".format(name, tags[\"X_types\"]),\n SkipTestWarning)\n return\n if tags[\"_skip_test\"]:\n warnings.warn(\"Explicit SKIP via _skip_test tag for estimator \"\n \"{}.\".format(name),\n SkipTestWarning)\n return\n\n for check in _yield_checks(estimator):\n yield check\n if is_classifier(estimator):\n for check in _yield_classifier_checks(estimator):\n yield check\n if is_regressor(estimator):\n for check in _yield_regressor_checks(estimator):\n yield check\n if hasattr(estimator, 'transform'):\n for check in _yield_transformer_checks(estimator):\n yield check\n if isinstance(estimator, ClusterMixin):\n for check in _yield_clustering_checks(estimator):\n yield check\n if is_outlier_detector(estimator):\n for check in _yield_outliers_checks(estimator):\n yield check\n yield check_parameters_default_constructible\n yield check_methods_sample_order_invariance\n yield check_methods_subset_invariance\n yield check_fit2d_1sample\n yield check_fit2d_1feature\n yield check_get_params_invariance\n yield check_set_params\n yield check_dict_unchanged\n yield check_dont_overwrite_parameters\n yield check_fit_idempotent\n if not tags[\"no_validation\"]:\n yield check_n_features_in\n yield check_fit1d\n yield check_fit2d_predict1d\n if tags[\"requires_y\"]:\n yield check_requires_y_none\n if tags[\"requires_positive_X\"]:\n yield check_fit_non_negative\n\n\ndef _get_check_estimator_ids(obj):\n \"\"\"Create pytest ids for checks.\n\n When `obj` is an estimator, this returns the pprint version of the\n estimator (with `print_changed_only=True`). When `obj` is a function, the\n name of the function is returned with its keyword arguments.\n\n `_get_check_estimator_ids` is designed to be used as the `id` in\n `pytest.mark.parametrize` where `check_estimator(..., generate_only=True)`\n is yielding estimators and checks.\n\n Parameters\n ----------\n obj : estimator or function\n Items generated by `check_estimator`.\n\n Returns\n -------\n id : str or None\n\n See Also\n --------\n check_estimator\n \"\"\"\n if callable(obj):\n if not isinstance(obj, partial):\n return obj.__name__\n\n if not obj.keywords:\n return obj.func.__name__\n\n kwstring = \",\".join([\"{}={}\".format(k, v)\n for k, v in obj.keywords.items()])\n return \"{}({})\".format(obj.func.__name__, kwstring)\n if hasattr(obj, \"get_params\"):\n with config_context(print_changed_only=True):\n return re.sub(r\"\\s\", \"\", str(obj))\n\n\ndef _construct_instance(Estimator):\n \"\"\"Construct Estimator instance if possible.\"\"\"\n required_parameters = getattr(Estimator, \"_required_parameters\", [])\n if len(required_parameters):\n if required_parameters in ([\"estimator\"], [\"base_estimator\"]):\n if issubclass(Estimator, RegressorMixin):\n estimator = Estimator(Ridge())\n else:\n estimator = Estimator(LogisticRegression(C=1))\n elif required_parameters in (['estimators'],):\n # Heterogeneous ensemble classes (i.e. stacking, voting)\n if issubclass(Estimator, RegressorMixin):\n estimator = Estimator(estimators=[\n (\"est1\", Ridge(alpha=0.1)),\n (\"est2\", Ridge(alpha=1))\n ])\n else:\n estimator = Estimator(estimators=[\n (\"est1\", LogisticRegression(C=0.1)),\n (\"est2\", LogisticRegression(C=1))\n ])\n else:\n msg = (f\"Can't instantiate estimator {Estimator.__name__} \"\n f\"parameters {required_parameters}\")\n # raise additional warning to be shown by pytest\n warnings.warn(msg, SkipTestWarning)\n raise SkipTest(msg)\n else:\n estimator = Estimator()\n return estimator\n\n\ndef _maybe_mark_xfail(estimator, check, pytest):\n # Mark (estimator, check) pairs as XFAIL if needed (see conditions in\n # _should_be_skipped_or_marked())\n # This is similar to _maybe_skip(), but this one is used by\n # @parametrize_with_checks() instead of check_estimator()\n\n should_be_marked, reason = _should_be_skipped_or_marked(estimator, check)\n if not should_be_marked:\n return estimator, check\n else:\n return pytest.param(estimator, check,\n marks=pytest.mark.xfail(reason=reason))\n\n\ndef _maybe_skip(estimator, check):\n # Wrap a check so that it's skipped if needed (see conditions in\n # _should_be_skipped_or_marked())\n # This is similar to _maybe_mark_xfail(), but this one is used by\n # check_estimator() instead of @parametrize_with_checks which requires\n # pytest\n should_be_skipped, reason = _should_be_skipped_or_marked(estimator, check)\n if not should_be_skipped:\n return check\n\n check_name = (check.func.__name__ if isinstance(check, partial)\n else check.__name__)\n\n @wraps(check)\n def wrapped(*args, **kwargs):\n raise SkipTest(\n f\"Skipping {check_name} for {estimator.__class__.__name__}: \"\n f\"{reason}\"\n )\n\n return wrapped\n\n\ndef _should_be_skipped_or_marked(estimator, check):\n # Return whether a check should be skipped (when using check_estimator())\n # or marked as XFAIL (when using @parametrize_with_checks()), along with a\n # reason.\n # Currently, a check should be skipped or marked if\n # the check is in the _xfail_checks tag of the estimator\n\n check_name = (check.func.__name__ if isinstance(check, partial)\n else check.__name__)\n\n xfail_checks = _safe_tags(estimator, key='_xfail_checks') or {}\n if check_name in xfail_checks:\n return True, xfail_checks[check_name]\n\n return False, 'placeholder reason that will never be used'\n\n\ndef parametrize_with_checks(estimators):\n \"\"\"Pytest specific decorator for parametrizing estimator checks.\n\n The `id` of each check is set to be a pprint version of the estimator\n and the name of the check with its keyword arguments.\n This allows to use `pytest -k` to specify which tests to run::\n\n pytest test_check_estimators.py -k check_estimators_fit_returns_self\n\n Parameters\n ----------\n estimators : list of estimators instances\n Estimators to generated checks for.\n\n .. versionchanged:: 0.24\n Passing a class was deprecated in version 0.23, and support for\n classes was removed in 0.24. Pass an instance instead.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n decorator : `pytest.mark.parametrize`\n\n Examples\n --------\n >>> from sklearn.utils.estimator_checks import parametrize_with_checks\n >>> from sklearn.linear_model import LogisticRegression\n >>> from sklearn.tree import DecisionTreeRegressor\n\n >>> @parametrize_with_checks([LogisticRegression(),\n ... DecisionTreeRegressor()])\n ... def test_sklearn_compatible_estimator(estimator, check):\n ... check(estimator)\n\n \"\"\"\n import pytest\n\n if any(isinstance(est, type) for est in estimators):\n msg = (\"Passing a class was deprecated in version 0.23 \"\n \"and isn't supported anymore from 0.24.\"\n \"Please pass an instance instead.\")\n raise TypeError(msg)\n\n def checks_generator():\n for estimator in estimators:\n name = type(estimator).__name__\n for check in _yield_all_checks(estimator):\n check = partial(check, name)\n yield _maybe_mark_xfail(estimator, check, pytest)\n\n return pytest.mark.parametrize(\"estimator, check\", checks_generator(),\n ids=_get_check_estimator_ids)\n\n\ndef check_estimator(Estimator, generate_only=False):\n \"\"\"Check if estimator adheres to scikit-learn conventions.\n\n This estimator will run an extensive test-suite for input validation,\n shapes, etc, making sure that the estimator complies with `scikit-learn`\n conventions as detailed in :ref:`rolling_your_own_estimator`.\n Additional tests for classifiers, regressors, clustering or transformers\n will be run if the Estimator class inherits from the corresponding mixin\n from sklearn.base.\n\n Setting `generate_only=True` returns a generator that yields (estimator,\n check) tuples where the check can be called independently from each\n other, i.e. `check(estimator)`. This allows all checks to be run\n independently and report the checks that are failing.\n\n scikit-learn provides a pytest specific decorator,\n :func:`~sklearn.utils.parametrize_with_checks`, making it easier to test\n multiple estimators.\n\n Parameters\n ----------\n Estimator : estimator object\n Estimator instance to check.\n\n .. versionchanged:: 0.24\n Passing a class was deprecated in version 0.23, and support for\n classes was removed in 0.24.\n\n generate_only : bool, default=False\n When `False`, checks are evaluated when `check_estimator` is called.\n When `True`, `check_estimator` returns a generator that yields\n (estimator, check) tuples. The check is run by calling\n `check(estimator)`.\n\n .. versionadded:: 0.22\n\n Returns\n -------\n checks_generator : generator\n Generator that yields (estimator, check) tuples. Returned when\n `generate_only=True`.\n \"\"\"\n if isinstance(Estimator, type):\n msg = (\"Passing a class was deprecated in version 0.23 \"\n \"and isn't supported anymore from 0.24.\"\n \"Please pass an instance instead.\")\n raise TypeError(msg)\n\n estimator = Estimator\n name = type(estimator).__name__\n\n def checks_generator():\n for check in _yield_all_checks(estimator):\n check = _maybe_skip(estimator, check)\n yield estimator, partial(check, name)\n\n if generate_only:\n return checks_generator()\n\n for estimator, check in checks_generator():\n try:\n check(estimator)\n except SkipTest as exception:\n # SkipTest is thrown when pandas can't be imported, or by checks\n # that are in the xfail_checks tag\n warnings.warn(str(exception), SkipTestWarning)\n\n\ndef _regression_dataset():\n global REGRESSION_DATASET\n if REGRESSION_DATASET is None:\n X, y = make_regression(\n n_samples=200, n_features=10, n_informative=1,\n bias=5.0, noise=20, random_state=42,\n )\n X = StandardScaler().fit_transform(X)\n REGRESSION_DATASET = X, y\n return REGRESSION_DATASET\n\n\ndef _set_checking_parameters(estimator):\n # set parameters to speed up some estimators and\n # avoid deprecated behaviour\n params = estimator.get_params()\n name = estimator.__class__.__name__\n if (\"n_iter\" in params and name != \"TSNE\"):\n estimator.set_params(n_iter=5)\n if \"max_iter\" in params:\n if estimator.max_iter is not None:\n estimator.set_params(max_iter=min(5, estimator.max_iter))\n # LinearSVR, LinearSVC\n if estimator.__class__.__name__ in ['LinearSVR', 'LinearSVC']:\n estimator.set_params(max_iter=20)\n # NMF\n if estimator.__class__.__name__ == 'NMF':\n # FIXME : init should be removed in 1.1\n estimator.set_params(max_iter=500, init='nndsvda')\n # MLP\n if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:\n estimator.set_params(max_iter=100)\n if \"n_resampling\" in params:\n # randomized lasso\n estimator.set_params(n_resampling=5)\n if \"n_estimators\" in params:\n estimator.set_params(n_estimators=min(5, estimator.n_estimators))\n if \"max_trials\" in params:\n # RANSAC\n estimator.set_params(max_trials=10)\n if \"n_init\" in params:\n # K-Means\n estimator.set_params(n_init=2)\n\n if name == 'TruncatedSVD':\n # TruncatedSVD doesn't run with n_components = n_features\n # This is ugly :-/\n estimator.n_components = 1\n\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = min(estimator.n_clusters, 2)\n\n if hasattr(estimator, \"n_best\"):\n estimator.n_best = 1\n\n if name == \"SelectFdr\":\n # be tolerant of noisy datasets (not actually speed)\n estimator.set_params(alpha=.5)\n\n if name == \"TheilSenRegressor\":\n estimator.max_subpopulation = 100\n\n if isinstance(estimator, BaseRandomProjection):\n # Due to the jl lemma and often very few samples, the number\n # of components of the random matrix projection will be probably\n # greater than the number of features.\n # So we impose a smaller number (avoid \"auto\" mode)\n estimator.set_params(n_components=2)\n\n if isinstance(estimator, SelectKBest):\n # SelectKBest has a default of k=10\n # which is more feature than we have in most case.\n estimator.set_params(k=1)\n\n if name in ('HistGradientBoostingClassifier',\n 'HistGradientBoostingRegressor'):\n # The default min_samples_leaf (20) isn't appropriate for small\n # datasets (only very shallow trees are built) that the checks use.\n estimator.set_params(min_samples_leaf=5)\n\n if name == 'DummyClassifier':\n # the default strategy prior would output constant predictions and fail\n # for check_classifiers_predictions\n estimator.set_params(strategy='stratified')\n\n # Speed-up by reducing the number of CV or splits for CV estimators\n loo_cv = ['RidgeCV']\n if name not in loo_cv and hasattr(estimator, 'cv'):\n estimator.set_params(cv=3)\n if hasattr(estimator, 'n_splits'):\n estimator.set_params(n_splits=3)\n\n if name == 'OneHotEncoder':\n estimator.set_params(handle_unknown='ignore')\n\n if name in CROSS_DECOMPOSITION:\n estimator.set_params(n_components=1)\n\n\nclass _NotAnArray:\n \"\"\"An object that is convertible to an array.\n\n Parameters\n ----------\n data : array-like\n The data.\n \"\"\"\n\n def __init__(self, data):\n self.data = np.asarray(data)\n\n def __array__(self, dtype=None):\n return self.data\n\n def __array_function__(self, func, types, args, kwargs):\n if func.__name__ == \"may_share_memory\":\n return True\n raise TypeError(\"Don't want to call array_function {}!\".format(\n func.__name__))\n\n\ndef _is_pairwise_metric(estimator):\n \"\"\"Returns True if estimator accepts pairwise metric.\n\n Parameters\n ----------\n estimator : object\n Estimator object to test.\n\n Returns\n -------\n out : bool\n True if _pairwise is set to True and False otherwise.\n \"\"\"\n metric = getattr(estimator, \"metric\", None)\n\n return bool(metric == 'precomputed')\n\n\ndef _pairwise_estimator_convert_X(X, estimator, kernel=linear_kernel):\n\n if _is_pairwise_metric(estimator):\n return pairwise_distances(X, metric='euclidean')\n if _is_pairwise(estimator):\n return kernel(X, X)\n\n return X\n\n\ndef _generate_sparse_matrix(X_csr):\n \"\"\"Generate sparse matrices with {32,64}bit indices of diverse format.\n\n Parameters\n ----------\n X_csr: CSR Matrix\n Input matrix in CSR format.\n\n Returns\n -------\n out: iter(Matrices)\n In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo',\n 'coo_64', 'csc_64', 'csr_64']\n \"\"\"\n\n assert X_csr.format == 'csr'\n yield 'csr', X_csr.copy()\n for sparse_format in ['dok', 'lil', 'dia', 'bsr', 'csc', 'coo']:\n yield sparse_format, X_csr.asformat(sparse_format)\n\n # Generate large indices matrix only if its supported by scipy\n X_coo = X_csr.asformat('coo')\n X_coo.row = X_coo.row.astype('int64')\n X_coo.col = X_coo.col.astype('int64')\n yield \"coo_64\", X_coo\n\n for sparse_format in ['csc', 'csr']:\n X = X_csr.asformat(sparse_format)\n X.indices = X.indices.astype('int64')\n X.indptr = X.indptr.astype('int64')\n yield sparse_format + \"_64\", X\n\n\ndef check_estimator_sparse_data(name, estimator_orig):\n rng = np.random.RandomState(0)\n X = rng.rand(40, 10)\n X[X < .8] = 0\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n X_csr = sparse.csr_matrix(X)\n y = (4 * rng.rand(40)).astype(int)\n # catch deprecation warnings\n with ignore_warnings(category=FutureWarning):\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n tags = _safe_tags(estimator_orig)\n for matrix_format, X in _generate_sparse_matrix(X_csr):\n # catch deprecation warnings\n with ignore_warnings(category=FutureWarning):\n estimator = clone(estimator_orig)\n if name in ['Scaler', 'StandardScaler']:\n estimator.set_params(with_mean=False)\n # fit and predict\n if \"64\" in matrix_format:\n err_msg = (\n f\"Estimator {name} doesn't seem to support {matrix_format} \"\n \"matrix, and is not failing gracefully, e.g. by using \"\n \"check_array(X, accept_large_sparse=False)\"\n )\n else:\n err_msg = (\n f\"Estimator {name} doesn't seem to fail gracefully on sparse \"\n \"data: error message should state explicitly that sparse \"\n \"input is not supported if this is not the case.\"\n )\n with raises(\n (TypeError, ValueError),\n match=[\"sparse\", \"Sparse\"],\n may_pass=True,\n err_msg=err_msg,\n ):\n with ignore_warnings(category=FutureWarning):\n estimator.fit(X, y)\n if hasattr(estimator, \"predict\"):\n pred = estimator.predict(X)\n if tags['multioutput_only']:\n assert pred.shape == (X.shape[0], 1)\n else:\n assert pred.shape == (X.shape[0],)\n if hasattr(estimator, 'predict_proba'):\n probs = estimator.predict_proba(X)\n if tags['binary_only']:\n expected_probs_shape = (X.shape[0], 2)\n else:\n expected_probs_shape = (X.shape[0], 4)\n assert probs.shape == expected_probs_shape\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_sample_weights_pandas_series(name, estimator_orig):\n # check that estimators will accept a 'sample_weight' parameter of\n # type pandas.Series in the 'fit' function.\n estimator = clone(estimator_orig)\n if has_fit_parameter(estimator, \"sample_weight\"):\n try:\n import pandas as pd\n X = np.array([[1, 1], [1, 2], [1, 3], [1, 4],\n [2, 1], [2, 2], [2, 3], [2, 4],\n [3, 1], [3, 2], [3, 3], [3, 4]])\n X = pd.DataFrame(_pairwise_estimator_convert_X(X, estimator_orig))\n y = pd.Series([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])\n weights = pd.Series([1] * 12)\n if _safe_tags(estimator, key=\"multioutput_only\"):\n y = pd.DataFrame(y)\n try:\n estimator.fit(X, y, sample_weight=weights)\n except ValueError:\n raise ValueError(\"Estimator {0} raises error if \"\n \"'sample_weight' parameter is of \"\n \"type pandas.Series\".format(name))\n except ImportError:\n raise SkipTest(\"pandas is not installed: not testing for \"\n \"input of type pandas.Series to class weight.\")\n\n\n@ignore_warnings(category=(FutureWarning))\ndef check_sample_weights_not_an_array(name, estimator_orig):\n # check that estimators will accept a 'sample_weight' parameter of\n # type _NotAnArray in the 'fit' function.\n estimator = clone(estimator_orig)\n if has_fit_parameter(estimator, \"sample_weight\"):\n X = np.array([[1, 1], [1, 2], [1, 3], [1, 4],\n [2, 1], [2, 2], [2, 3], [2, 4],\n [3, 1], [3, 2], [3, 3], [3, 4]])\n X = _NotAnArray(_pairwise_estimator_convert_X(X, estimator_orig))\n y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])\n weights = _NotAnArray([1] * 12)\n if _safe_tags(estimator, key=\"multioutput_only\"):\n y = _NotAnArray(y.data.reshape(-1, 1))\n estimator.fit(X, y, sample_weight=weights)\n\n\n@ignore_warnings(category=(FutureWarning))\ndef check_sample_weights_list(name, estimator_orig):\n # check that estimators will accept a 'sample_weight' parameter of\n # type list in the 'fit' function.\n if has_fit_parameter(estimator_orig, \"sample_weight\"):\n estimator = clone(estimator_orig)\n rnd = np.random.RandomState(0)\n n_samples = 30\n X = _pairwise_estimator_convert_X(rnd.uniform(size=(n_samples, 3)),\n estimator_orig)\n y = np.arange(n_samples) % 3\n y = _enforce_estimator_tags_y(estimator, y)\n sample_weight = [3] * n_samples\n # Test that estimators don't raise any exception\n estimator.fit(X, y, sample_weight=sample_weight)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_sample_weights_shape(name, estimator_orig):\n # check that estimators raise an error if sample_weight\n # shape mismatches the input\n if (has_fit_parameter(estimator_orig, \"sample_weight\") and\n not _is_pairwise(estimator_orig)):\n estimator = clone(estimator_orig)\n X = np.array([[1, 3], [1, 3], [1, 3], [1, 3],\n [2, 1], [2, 1], [2, 1], [2, 1],\n [3, 3], [3, 3], [3, 3], [3, 3],\n [4, 1], [4, 1], [4, 1], [4, 1]])\n y = np.array([1, 1, 1, 1, 2, 2, 2, 2,\n 1, 1, 1, 1, 2, 2, 2, 2])\n y = _enforce_estimator_tags_y(estimator, y)\n\n estimator.fit(X, y, sample_weight=np.ones(len(y)))\n\n with raises(ValueError):\n estimator.fit(X, y, sample_weight=np.ones(2 * len(y)))\n\n with raises(ValueError):\n estimator.fit(X, y, sample_weight=np.ones((len(y), 2)))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_sample_weights_invariance(name, estimator_orig, kind=\"ones\"):\n # For kind=\"ones\" check that the estimators yield same results for\n # unit weights and no weights\n # For kind=\"zeros\" check that setting sample_weight to 0 is equivalent\n # to removing corresponding samples.\n estimator1 = clone(estimator_orig)\n estimator2 = clone(estimator_orig)\n set_random_state(estimator1, random_state=0)\n set_random_state(estimator2, random_state=0)\n\n X1 = np.array([[1, 3], [1, 3], [1, 3], [1, 3],\n [2, 1], [2, 1], [2, 1], [2, 1],\n [3, 3], [3, 3], [3, 3], [3, 3],\n [4, 1], [4, 1], [4, 1], [4, 1]], dtype=np.float64)\n y1 = np.array([1, 1, 1, 1, 2, 2, 2, 2,\n 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)\n\n if kind == 'ones':\n X2 = X1\n y2 = y1\n sw2 = np.ones(shape=len(y1))\n err_msg = (f\"For {name} sample_weight=None is not equivalent to \"\n f\"sample_weight=ones\")\n elif kind == 'zeros':\n # Construct a dataset that is very different to (X, y) if weights\n # are disregarded, but identical to (X, y) given weights.\n X2 = np.vstack([X1, X1 + 1])\n y2 = np.hstack([y1, 3 - y1])\n sw2 = np.ones(shape=len(y1) * 2)\n sw2[len(y1):] = 0\n X2, y2, sw2 = shuffle(X2, y2, sw2, random_state=0)\n\n err_msg = (f\"For {name}, a zero sample_weight is not equivalent \"\n f\"to removing the sample\")\n else: # pragma: no cover\n raise ValueError\n\n y1 = _enforce_estimator_tags_y(estimator1, y1)\n y2 = _enforce_estimator_tags_y(estimator2, y2)\n\n estimator1.fit(X1, y=y1, sample_weight=None)\n estimator2.fit(X2, y=y2, sample_weight=sw2)\n\n for method in [\"predict\", \"predict_proba\",\n \"decision_function\", \"transform\"]:\n if hasattr(estimator_orig, method):\n X_pred1 = getattr(estimator1, method)(X1)\n X_pred2 = getattr(estimator2, method)(X1)\n assert_allclose_dense_sparse(X_pred1, X_pred2, err_msg=err_msg)\n\n\n@ignore_warnings(category=(FutureWarning, UserWarning))\ndef check_dtype_object(name, estimator_orig):\n # check that estimators treat dtype object as numeric if possible\n rng = np.random.RandomState(0)\n X = _pairwise_estimator_convert_X(rng.rand(40, 10), estimator_orig)\n X = X.astype(object)\n tags = _safe_tags(estimator_orig)\n y = (X[:, 0] * 4).astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n estimator.fit(X, y)\n if hasattr(estimator, \"predict\"):\n estimator.predict(X)\n\n if hasattr(estimator, \"transform\"):\n estimator.transform(X)\n\n with raises(Exception, match=\"Unknown label type\", may_pass=True):\n estimator.fit(X, y.astype(object))\n\n if 'string' not in tags['X_types']:\n X[0, 0] = {'foo': 'bar'}\n msg = \"argument must be a string.* number\"\n with raises(TypeError, match=msg):\n estimator.fit(X, y)\n else:\n # Estimators supporting string will not call np.asarray to convert the\n # data to numeric and therefore, the error will not be raised.\n # Checking for each element dtype in the input array will be costly.\n # Refer to #11401 for full discussion.\n estimator.fit(X, y)\n\n\ndef check_complex_data(name, estimator_orig):\n # check that estimators raise an exception on providing complex data\n X = np.random.sample(10) + 1j * np.random.sample(10)\n X = X.reshape(-1, 1)\n y = np.random.sample(10) + 1j * np.random.sample(10)\n estimator = clone(estimator_orig)\n with raises(ValueError, match=\"Complex data not supported\"):\n estimator.fit(X, y)\n\n\n@ignore_warnings\ndef check_dict_unchanged(name, estimator_orig):\n # this estimator raises\n # ValueError: Found array with 0 feature(s) (shape=(23, 0))\n # while a minimum of 1 is required.\n # error\n if name in ['SpectralCoclustering']:\n return\n rnd = np.random.RandomState(0)\n if name in ['RANSACRegressor']:\n X = 3 * rnd.uniform(size=(20, 3))\n else:\n X = 2 * rnd.uniform(size=(20, 3))\n\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n\n y = X[:, 0].astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n\n if hasattr(estimator, \"n_best\"):\n estimator.n_best = 1\n\n set_random_state(estimator, 1)\n\n estimator.fit(X, y)\n for method in [\"predict\", \"transform\", \"decision_function\",\n \"predict_proba\"]:\n if hasattr(estimator, method):\n dict_before = estimator.__dict__.copy()\n getattr(estimator, method)(X)\n assert estimator.__dict__ == dict_before, (\n 'Estimator changes __dict__ during %s' % method)\n\n\ndef _is_public_parameter(attr):\n return not (attr.startswith('_') or attr.endswith('_'))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_dont_overwrite_parameters(name, estimator_orig):\n # check that fit method only changes or sets private attributes\n if hasattr(estimator_orig.__init__, \"deprecated_original\"):\n # to not check deprecated classes\n return\n estimator = clone(estimator_orig)\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(20, 3))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = X[:, 0].astype(int)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n\n set_random_state(estimator, 1)\n dict_before_fit = estimator.__dict__.copy()\n estimator.fit(X, y)\n\n dict_after_fit = estimator.__dict__\n\n public_keys_after_fit = [key for key in dict_after_fit.keys()\n if _is_public_parameter(key)]\n\n attrs_added_by_fit = [key for key in public_keys_after_fit\n if key not in dict_before_fit.keys()]\n\n # check that fit doesn't add any public attribute\n assert not attrs_added_by_fit, (\n 'Estimator adds public attribute(s) during' ' the fit method.'\n ' Estimators are only allowed to add private attributes'\n ' either started with _ or ended'\n ' with _ but %s added'\n % ', '.join(attrs_added_by_fit))\n\n # check that fit doesn't change any public attribute\n attrs_changed_by_fit = [key for key in public_keys_after_fit\n if (dict_before_fit[key]\n is not dict_after_fit[key])]\n\n assert not attrs_changed_by_fit, (\n 'Estimator changes public attribute(s) during'\n ' the fit method. Estimators are only allowed'\n ' to change attributes started'\n ' or ended with _, but'\n ' %s changed'\n % ', '.join(attrs_changed_by_fit))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_fit2d_predict1d(name, estimator_orig):\n # check by fitting a 2d array and predicting with a 1d array\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(20, 3))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = X[:, 0].astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n\n set_random_state(estimator, 1)\n estimator.fit(X, y)\n\n for method in [\"predict\", \"transform\", \"decision_function\",\n \"predict_proba\"]:\n if hasattr(estimator, method):\n assert_raise_message(ValueError, \"Reshape your data\",\n getattr(estimator, method), X[0])\n\n\ndef _apply_on_subsets(func, X):\n # apply function on the whole set and on mini batches\n result_full = func(X)\n n_features = X.shape[1]\n result_by_batch = [func(batch.reshape(1, n_features))\n for batch in X]\n\n # func can output tuple (e.g. score_samples)\n if type(result_full) == tuple:\n result_full = result_full[0]\n result_by_batch = list(map(lambda x: x[0], result_by_batch))\n\n if sparse.issparse(result_full):\n result_full = result_full.A\n result_by_batch = [x.A for x in result_by_batch]\n\n return np.ravel(result_full), np.ravel(result_by_batch)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_methods_subset_invariance(name, estimator_orig):\n # check that method gives invariant results if applied\n # on mini batches or the whole set\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(20, 3))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = X[:, 0].astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n\n set_random_state(estimator, 1)\n estimator.fit(X, y)\n\n for method in [\"predict\", \"transform\", \"decision_function\",\n \"score_samples\", \"predict_proba\"]:\n\n msg = (\"{method} of {name} is not invariant when applied \"\n \"to a subset.\").format(method=method, name=name)\n\n if hasattr(estimator, method):\n result_full, result_by_batch = _apply_on_subsets(\n getattr(estimator, method), X)\n assert_allclose(result_full, result_by_batch,\n atol=1e-7, err_msg=msg)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_methods_sample_order_invariance(name, estimator_orig):\n # check that method gives invariant results if applied\n # on a subset with different sample order\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(20, 3))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = X[:, 0].astype(np.int64)\n if _safe_tags(estimator_orig, key='binary_only'):\n y[y == 2] = 1\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 2\n\n set_random_state(estimator, 1)\n estimator.fit(X, y)\n\n idx = np.random.permutation(X.shape[0])\n\n for method in [\"predict\", \"transform\", \"decision_function\",\n \"score_samples\", \"predict_proba\"]:\n msg = (\"{method} of {name} is not invariant when applied to a dataset\"\n \"with different sample order.\").format(method=method, name=name)\n\n if hasattr(estimator, method):\n assert_allclose_dense_sparse(getattr(estimator, method)(X)[idx],\n getattr(estimator, method)(X[idx]),\n atol=1e-9,\n err_msg=msg)\n\n\n@ignore_warnings\ndef check_fit2d_1sample(name, estimator_orig):\n # Check that fitting a 2d array with only one sample either works or\n # returns an informative message. The error message should either mention\n # the number of samples or the number of classes.\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(1, 10))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n\n y = X[:, 0].astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n\n set_random_state(estimator, 1)\n\n # min_cluster_size cannot be less than the data size for OPTICS.\n if name == 'OPTICS':\n estimator.set_params(min_samples=1)\n\n msgs = [\"1 sample\", \"n_samples = 1\", \"n_samples=1\", \"one sample\",\n \"1 class\", \"one class\"]\n\n with raises(ValueError, match=msgs, may_pass=True):\n estimator.fit(X, y)\n\n\n@ignore_warnings\ndef check_fit2d_1feature(name, estimator_orig):\n # check fitting a 2d array with only 1 feature either works or returns\n # informative message\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(10, 1))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = X[:, 0].astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n # ensure two labels in subsample for RandomizedLogisticRegression\n if name == 'RandomizedLogisticRegression':\n estimator.sample_fraction = 1\n # ensure non skipped trials for RANSACRegressor\n if name == 'RANSACRegressor':\n estimator.residual_threshold = 0.5\n\n y = _enforce_estimator_tags_y(estimator, y)\n set_random_state(estimator, 1)\n\n msgs = [r\"1 feature\\(s\\)\", \"n_features = 1\", \"n_features=1\"]\n\n with raises(ValueError, match=msgs, may_pass=True):\n estimator.fit(X, y)\n\n\n@ignore_warnings\ndef check_fit1d(name, estimator_orig):\n # check fitting 1d X array raises a ValueError\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(20))\n y = X.astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n\n set_random_state(estimator, 1)\n with raises(ValueError):\n estimator.fit(X, y)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_transformer_general(name, transformer, readonly_memmap=False):\n X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0, n_features=2, cluster_std=0.1)\n X = StandardScaler().fit_transform(X)\n X -= X.min()\n X = _pairwise_estimator_convert_X(X, transformer)\n\n if readonly_memmap:\n X, y = create_memmap_backed_data([X, y])\n\n _check_transformer(name, transformer, X, y)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_transformer_data_not_an_array(name, transformer):\n X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0, n_features=2, cluster_std=0.1)\n X = StandardScaler().fit_transform(X)\n # We need to make sure that we have non negative data, for things\n # like NMF\n X -= X.min() - .1\n X = _pairwise_estimator_convert_X(X, transformer)\n this_X = _NotAnArray(X)\n this_y = _NotAnArray(np.asarray(y))\n _check_transformer(name, transformer, this_X, this_y)\n # try the same with some list\n _check_transformer(name, transformer, X.tolist(), y.tolist())\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_transformers_unfitted(name, transformer):\n X, y = _regression_dataset()\n\n transformer = clone(transformer)\n with raises(\n (AttributeError, ValueError),\n err_msg=\"The unfitted \"\n f\"transformer {name} does not raise an error when \"\n \"transform is called. Perhaps use \"\n \"check_is_fitted in transform.\",\n ):\n transformer.transform(X)\n\n\ndef _check_transformer(name, transformer_orig, X, y):\n n_samples, n_features = np.asarray(X).shape\n transformer = clone(transformer_orig)\n set_random_state(transformer)\n\n # fit\n\n if name in CROSS_DECOMPOSITION:\n y_ = np.c_[np.asarray(y), np.asarray(y)]\n y_[::2, 1] *= 2\n if isinstance(X, _NotAnArray):\n y_ = _NotAnArray(y_)\n else:\n y_ = y\n\n transformer.fit(X, y_)\n # fit_transform method should work on non fitted estimator\n transformer_clone = clone(transformer)\n X_pred = transformer_clone.fit_transform(X, y=y_)\n\n if isinstance(X_pred, tuple):\n for x_pred in X_pred:\n assert x_pred.shape[0] == n_samples\n else:\n # check for consistent n_samples\n assert X_pred.shape[0] == n_samples\n\n if hasattr(transformer, 'transform'):\n if name in CROSS_DECOMPOSITION:\n X_pred2 = transformer.transform(X, y_)\n X_pred3 = transformer.fit_transform(X, y=y_)\n else:\n X_pred2 = transformer.transform(X)\n X_pred3 = transformer.fit_transform(X, y=y_)\n\n if _safe_tags(transformer_orig, key='non_deterministic'):\n msg = name + ' is non deterministic'\n raise SkipTest(msg)\n if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):\n for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):\n assert_allclose_dense_sparse(\n x_pred, x_pred2, atol=1e-2,\n err_msg=\"fit_transform and transform outcomes \"\n \"not consistent in %s\"\n % transformer)\n assert_allclose_dense_sparse(\n x_pred, x_pred3, atol=1e-2,\n err_msg=\"consecutive fit_transform outcomes \"\n \"not consistent in %s\"\n % transformer)\n else:\n assert_allclose_dense_sparse(\n X_pred, X_pred2,\n err_msg=\"fit_transform and transform outcomes \"\n \"not consistent in %s\"\n % transformer, atol=1e-2)\n assert_allclose_dense_sparse(\n X_pred, X_pred3, atol=1e-2,\n err_msg=\"consecutive fit_transform outcomes \"\n \"not consistent in %s\"\n % transformer)\n assert _num_samples(X_pred2) == n_samples\n assert _num_samples(X_pred3) == n_samples\n\n # raises error on malformed input for transform\n if hasattr(X, 'shape') and \\\n not _safe_tags(transformer, key=\"stateless\") and \\\n X.ndim == 2 and X.shape[1] > 1:\n\n # If it's not an array, it does not have a 'T' property\n with raises(\n ValueError,\n err_msg=f\"The transformer {name} does not raise an error \"\n \"when the number of features in transform is different from \"\n \"the number of features in fit.\"\n ):\n transformer.transform(X[:, :-1])\n\n\n@ignore_warnings\ndef check_pipeline_consistency(name, estimator_orig):\n if _safe_tags(estimator_orig, key='non_deterministic'):\n msg = name + ' is non deterministic'\n raise SkipTest(msg)\n\n # check that make_pipeline(est) gives same score as est\n X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0, n_features=2, cluster_std=0.1)\n X -= X.min()\n X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n set_random_state(estimator)\n pipeline = make_pipeline(estimator)\n estimator.fit(X, y)\n pipeline.fit(X, y)\n\n funcs = [\"score\", \"fit_transform\"]\n\n for func_name in funcs:\n func = getattr(estimator, func_name, None)\n if func is not None:\n func_pipeline = getattr(pipeline, func_name)\n result = func(X, y)\n result_pipe = func_pipeline(X, y)\n assert_allclose_dense_sparse(result, result_pipe)\n\n\n@ignore_warnings\ndef check_fit_score_takes_y(name, estimator_orig):\n # check that all estimators accept an optional y\n # in fit and score so they can be used in pipelines\n rnd = np.random.RandomState(0)\n n_samples = 30\n X = rnd.uniform(size=(n_samples, 3))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = np.arange(n_samples) % 3\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n set_random_state(estimator)\n\n funcs = [\"fit\", \"score\", \"partial_fit\", \"fit_predict\", \"fit_transform\"]\n for func_name in funcs:\n func = getattr(estimator, func_name, None)\n if func is not None:\n func(X, y)\n args = [p.name for p in signature(func).parameters.values()]\n if args[0] == \"self\":\n # if_delegate_has_method makes methods into functions\n # with an explicit \"self\", so need to shift arguments\n args = args[1:]\n assert args[1] in [\"y\", \"Y\"], (\n \"Expected y or Y as second argument for method \"\n \"%s of %s. Got arguments: %r.\"\n % (func_name, type(estimator).__name__, args))\n\n\n@ignore_warnings\ndef check_estimators_dtypes(name, estimator_orig):\n rnd = np.random.RandomState(0)\n X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)\n X_train_32 = _pairwise_estimator_convert_X(X_train_32, estimator_orig)\n X_train_64 = X_train_32.astype(np.float64)\n X_train_int_64 = X_train_32.astype(np.int64)\n X_train_int_32 = X_train_32.astype(np.int32)\n y = X_train_int_64[:, 0]\n y = _enforce_estimator_tags_y(estimator_orig, y)\n\n methods = [\"predict\", \"transform\", \"decision_function\", \"predict_proba\"]\n\n for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:\n estimator = clone(estimator_orig)\n set_random_state(estimator, 1)\n estimator.fit(X_train, y)\n\n for method in methods:\n if hasattr(estimator, method):\n getattr(estimator, method)(X_train)\n\n\ndef check_transformer_preserve_dtypes(name, transformer_orig):\n # check that dtype are preserved meaning if input X is of some dtype\n # X_transformed should be from the same dtype.\n X, y = make_blobs(\n n_samples=30,\n centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0,\n cluster_std=0.1,\n )\n X = StandardScaler().fit_transform(X)\n X -= X.min()\n X = _pairwise_estimator_convert_X(X, transformer_orig)\n\n for dtype in _safe_tags(transformer_orig, key=\"preserves_dtype\"):\n X_cast = X.astype(dtype)\n transformer = clone(transformer_orig)\n set_random_state(transformer)\n X_trans = transformer.fit_transform(X_cast, y)\n\n if isinstance(X_trans, tuple):\n # cross-decompostion returns a tuple of (x_scores, y_scores)\n # when given y with fit_transform; only check the first element\n X_trans = X_trans[0]\n\n # check that the output dtype is preserved\n assert X_trans.dtype == dtype, (\n f'Estimator transform dtype: {X_trans.dtype} - '\n f'original/expected dtype: {dtype.__name__}'\n )\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_estimators_empty_data_messages(name, estimator_orig):\n e = clone(estimator_orig)\n set_random_state(e, 1)\n\n X_zero_samples = np.empty(0).reshape(0, 3)\n # The precise message can change depending on whether X or y is\n # validated first. Let us test the type of exception only:\n err_msg = (\n f\"The estimator {name} does not raise an error when an \"\n \"empty data is used to train. Perhaps use check_array in train.\"\n )\n with raises(ValueError, err_msg=err_msg):\n e.fit(X_zero_samples, [])\n\n X_zero_features = np.empty(0).reshape(12, 0)\n # the following y should be accepted by both classifiers and regressors\n # and ignored by unsupervised models\n y = _enforce_estimator_tags_y(\n e, np.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0])\n )\n msg = (\n r\"0 feature\\(s\\) \\(shape=\\(\\d*, 0\\)\\) while a minimum of \\d* \"\n \"is required.\"\n )\n with raises(ValueError, match=msg):\n e.fit(X_zero_features, y)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_estimators_nan_inf(name, estimator_orig):\n # Checks that Estimator X's do not contain NaN or inf.\n rnd = np.random.RandomState(0)\n X_train_finite = _pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)),\n estimator_orig)\n X_train_nan = rnd.uniform(size=(10, 3))\n X_train_nan[0, 0] = np.nan\n X_train_inf = rnd.uniform(size=(10, 3))\n X_train_inf[0, 0] = np.inf\n y = np.ones(10)\n y[:5] = 0\n y = _enforce_estimator_tags_y(estimator_orig, y)\n error_string_fit = \"Estimator doesn't check for NaN and inf in fit.\"\n error_string_predict = (\"Estimator doesn't check for NaN and inf in\"\n \" predict.\")\n error_string_transform = (\"Estimator doesn't check for NaN and inf in\"\n \" transform.\")\n for X_train in [X_train_nan, X_train_inf]:\n # catch deprecation warnings\n with ignore_warnings(category=FutureWarning):\n estimator = clone(estimator_orig)\n set_random_state(estimator, 1)\n # try to fit\n with raises(\n ValueError, match=[\"inf\", \"NaN\"], err_msg=error_string_fit\n ):\n estimator.fit(X_train, y)\n # actually fit\n estimator.fit(X_train_finite, y)\n\n # predict\n if hasattr(estimator, \"predict\"):\n with raises(\n ValueError,\n match=[\"inf\", \"NaN\"],\n err_msg=error_string_predict,\n ):\n estimator.predict(X_train)\n\n # transform\n if hasattr(estimator, \"transform\"):\n with raises(\n ValueError,\n match=[\"inf\", \"NaN\"],\n err_msg=error_string_transform,\n ):\n estimator.transform(X_train)\n\n\n@ignore_warnings\ndef check_nonsquare_error(name, estimator_orig):\n \"\"\"Test that error is thrown when non-square data provided.\"\"\"\n\n X, y = make_blobs(n_samples=20, n_features=10)\n estimator = clone(estimator_orig)\n\n with raises(\n ValueError,\n err_msg=f\"The pairwise estimator {name} does not raise an error \"\n \"on non-square data\",\n ):\n estimator.fit(X, y)\n\n\n@ignore_warnings\ndef check_estimators_pickle(name, estimator_orig):\n \"\"\"Test that we can pickle all estimators.\"\"\"\n check_methods = [\"predict\", \"transform\", \"decision_function\",\n \"predict_proba\"]\n\n X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0, n_features=2, cluster_std=0.1)\n\n # some estimators can't do features less than 0\n X -= X.min()\n X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)\n\n tags = _safe_tags(estimator_orig)\n # include NaN values when the estimator should deal with them\n if tags['allow_nan']:\n # set randomly 10 elements to np.nan\n rng = np.random.RandomState(42)\n mask = rng.choice(X.size, 10, replace=False)\n X.reshape(-1)[mask] = np.nan\n\n estimator = clone(estimator_orig)\n\n y = _enforce_estimator_tags_y(estimator, y)\n\n set_random_state(estimator)\n estimator.fit(X, y)\n\n # pickle and unpickle!\n pickled_estimator = pickle.dumps(estimator)\n module_name = estimator.__module__\n if module_name.startswith('sklearn.') and not (\n \"test_\" in module_name or module_name.endswith(\"_testing\")\n ):\n # strict check for sklearn estimators that are not implemented in test\n # modules.\n assert b\"version\" in pickled_estimator\n unpickled_estimator = pickle.loads(pickled_estimator)\n\n result = dict()\n for method in check_methods:\n if hasattr(estimator, method):\n result[method] = getattr(estimator, method)(X)\n\n for method in result:\n unpickled_result = getattr(unpickled_estimator, method)(X)\n assert_allclose_dense_sparse(result[method], unpickled_result)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_estimators_partial_fit_n_features(name, estimator_orig):\n # check if number of features changes between calls to partial_fit.\n if not hasattr(estimator_orig, 'partial_fit'):\n return\n estimator = clone(estimator_orig)\n X, y = make_blobs(n_samples=50, random_state=1)\n X -= X.min()\n y = _enforce_estimator_tags_y(estimator_orig, y)\n\n try:\n if is_classifier(estimator):\n classes = np.unique(y)\n estimator.partial_fit(X, y, classes=classes)\n else:\n estimator.partial_fit(X, y)\n except NotImplementedError:\n return\n\n with raises(\n ValueError,\n err_msg=f\"The estimator {name} does not raise an error when the \"\n \"number of features changes between calls to partial_fit.\",\n ):\n estimator.partial_fit(X[:, :-1], y)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_classifier_multioutput(name, estimator):\n n_samples, n_labels, n_classes = 42, 5, 3\n tags = _safe_tags(estimator)\n estimator = clone(estimator)\n X, y = make_multilabel_classification(random_state=42,\n n_samples=n_samples,\n n_labels=n_labels,\n n_classes=n_classes)\n estimator.fit(X, y)\n y_pred = estimator.predict(X)\n\n assert y_pred.shape == (n_samples, n_classes), (\n \"The shape of the prediction for multioutput data is \"\n \"incorrect. Expected {}, got {}.\"\n .format((n_samples, n_labels), y_pred.shape))\n assert y_pred.dtype.kind == 'i'\n\n if hasattr(estimator, \"decision_function\"):\n decision = estimator.decision_function(X)\n assert isinstance(decision, np.ndarray)\n assert decision.shape == (n_samples, n_classes), (\n \"The shape of the decision function output for \"\n \"multioutput data is incorrect. Expected {}, got {}.\"\n .format((n_samples, n_classes), decision.shape))\n\n dec_pred = (decision > 0).astype(int)\n dec_exp = estimator.classes_[dec_pred]\n assert_array_equal(dec_exp, y_pred)\n\n if hasattr(estimator, \"predict_proba\"):\n y_prob = estimator.predict_proba(X)\n\n if isinstance(y_prob, list) and not tags['poor_score']:\n for i in range(n_classes):\n assert y_prob[i].shape == (n_samples, 2), (\n \"The shape of the probability for multioutput data is\"\n \" incorrect. Expected {}, got {}.\"\n .format((n_samples, 2), y_prob[i].shape))\n assert_array_equal(\n np.argmax(y_prob[i], axis=1).astype(int),\n y_pred[:, i]\n )\n elif not tags['poor_score']:\n assert y_prob.shape == (n_samples, n_classes), (\n \"The shape of the probability for multioutput data is\"\n \" incorrect. Expected {}, got {}.\"\n .format((n_samples, n_classes), y_prob.shape))\n assert_array_equal(y_prob.round().astype(int), y_pred)\n\n if (hasattr(estimator, \"decision_function\") and\n hasattr(estimator, \"predict_proba\")):\n for i in range(n_classes):\n y_proba = estimator.predict_proba(X)[:, i]\n y_decision = estimator.decision_function(X)\n assert_array_equal(rankdata(y_proba), rankdata(y_decision[:, i]))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_regressor_multioutput(name, estimator):\n estimator = clone(estimator)\n n_samples = n_features = 10\n\n if not _is_pairwise_metric(estimator):\n n_samples = n_samples + 1\n\n X, y = make_regression(random_state=42, n_targets=5,\n n_samples=n_samples, n_features=n_features)\n X = _pairwise_estimator_convert_X(X, estimator)\n\n estimator.fit(X, y)\n y_pred = estimator.predict(X)\n\n assert y_pred.dtype == np.dtype('float64'), (\n \"Multioutput predictions by a regressor are expected to be\"\n \" floating-point precision. Got {} instead\".format(y_pred.dtype))\n assert y_pred.shape == y.shape, (\n \"The shape of the prediction for multioutput data is incorrect.\"\n \" Expected {}, got {}.\")\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_clustering(name, clusterer_orig, readonly_memmap=False):\n clusterer = clone(clusterer_orig)\n X, y = make_blobs(n_samples=50, random_state=1)\n X, y = shuffle(X, y, random_state=7)\n X = StandardScaler().fit_transform(X)\n rng = np.random.RandomState(7)\n X_noise = np.concatenate([X, rng.uniform(low=-3, high=3, size=(5, 2))])\n\n if readonly_memmap:\n X, y, X_noise = create_memmap_backed_data([X, y, X_noise])\n\n n_samples, n_features = X.shape\n # catch deprecation and neighbors warnings\n if hasattr(clusterer, \"n_clusters\"):\n clusterer.set_params(n_clusters=3)\n set_random_state(clusterer)\n if name == 'AffinityPropagation':\n clusterer.set_params(preference=-100)\n clusterer.set_params(max_iter=100)\n\n # fit\n clusterer.fit(X)\n # with lists\n clusterer.fit(X.tolist())\n\n pred = clusterer.labels_\n assert pred.shape == (n_samples,)\n assert adjusted_rand_score(pred, y) > 0.4\n if _safe_tags(clusterer, key='non_deterministic'):\n return\n set_random_state(clusterer)\n with warnings.catch_warnings(record=True):\n pred2 = clusterer.fit_predict(X)\n assert_array_equal(pred, pred2)\n\n # fit_predict(X) and labels_ should be of type int\n assert pred.dtype in [np.dtype('int32'), np.dtype('int64')]\n assert pred2.dtype in [np.dtype('int32'), np.dtype('int64')]\n\n # Add noise to X to test the possible values of the labels\n labels = clusterer.fit_predict(X_noise)\n\n # There should be at least one sample in every cluster. Equivalently\n # labels_ should contain all the consecutive values between its\n # min and its max.\n labels_sorted = np.unique(labels)\n assert_array_equal(labels_sorted, np.arange(labels_sorted[0],\n labels_sorted[-1] + 1))\n\n # Labels are expected to start at 0 (no noise) or -1 (if noise)\n assert labels_sorted[0] in [0, -1]\n # Labels should be less than n_clusters - 1\n if hasattr(clusterer, 'n_clusters'):\n n_clusters = getattr(clusterer, 'n_clusters')\n assert n_clusters - 1 >= labels_sorted[-1]\n # else labels should be less than max(labels_) which is necessarily true\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_clusterer_compute_labels_predict(name, clusterer_orig):\n \"\"\"Check that predict is invariant of compute_labels.\"\"\"\n X, y = make_blobs(n_samples=20, random_state=0)\n clusterer = clone(clusterer_orig)\n set_random_state(clusterer)\n\n if hasattr(clusterer, \"compute_labels\"):\n # MiniBatchKMeans\n X_pred1 = clusterer.fit(X).predict(X)\n clusterer.set_params(compute_labels=False)\n X_pred2 = clusterer.fit(X).predict(X)\n assert_array_equal(X_pred1, X_pred2)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_classifiers_one_label(name, classifier_orig):\n error_string_fit = \"Classifier can't train when only one class is present.\"\n error_string_predict = (\"Classifier can't predict when only one class is \"\n \"present.\")\n rnd = np.random.RandomState(0)\n X_train = rnd.uniform(size=(10, 3))\n X_test = rnd.uniform(size=(10, 3))\n y = np.ones(10)\n # catch deprecation warnings\n with ignore_warnings(category=FutureWarning):\n classifier = clone(classifier_orig)\n with raises(\n ValueError, match=\"class\", may_pass=True, err_msg=error_string_fit\n ) as cm:\n classifier.fit(X_train, y)\n\n if cm.raised_and_matched:\n # ValueError was raised with proper error message\n return\n\n assert_array_equal(\n classifier.predict(X_test), y, err_msg=error_string_predict\n )\n\n\n@ignore_warnings # Warnings are raised by decision function\ndef check_classifiers_train(\n name, classifier_orig, readonly_memmap=False, X_dtype=\"float64\"\n):\n X_m, y_m = make_blobs(n_samples=300, random_state=0)\n X_m = X_m.astype(X_dtype)\n X_m, y_m = shuffle(X_m, y_m, random_state=7)\n X_m = StandardScaler().fit_transform(X_m)\n # generate binary problem from multi-class one\n y_b = y_m[y_m != 2]\n X_b = X_m[y_m != 2]\n\n if name in ['BernoulliNB', 'MultinomialNB', 'ComplementNB',\n 'CategoricalNB']:\n X_m -= X_m.min()\n X_b -= X_b.min()\n\n if readonly_memmap:\n X_m, y_m, X_b, y_b = create_memmap_backed_data([X_m, y_m, X_b, y_b])\n\n problems = [(X_b, y_b)]\n tags = _safe_tags(classifier_orig)\n if not tags['binary_only']:\n problems.append((X_m, y_m))\n\n for (X, y) in problems:\n classes = np.unique(y)\n n_classes = len(classes)\n n_samples, n_features = X.shape\n classifier = clone(classifier_orig)\n X = _pairwise_estimator_convert_X(X, classifier)\n y = _enforce_estimator_tags_y(classifier, y)\n\n set_random_state(classifier)\n # raises error on malformed input for fit\n if not tags[\"no_validation\"]:\n with raises(\n ValueError,\n err_msg=f\"The classifier {name} does not raise an error when \"\n \"incorrect/malformed input data for fit is passed. The number \"\n \"of training examples is not the same as the number of \"\n \"labels. Perhaps use check_X_y in fit.\",\n ):\n classifier.fit(X, y[:-1])\n\n # fit\n classifier.fit(X, y)\n # with lists\n classifier.fit(X.tolist(), y.tolist())\n assert hasattr(classifier, \"classes_\")\n y_pred = classifier.predict(X)\n\n assert y_pred.shape == (n_samples,)\n # training set performance\n if not tags['poor_score']:\n assert accuracy_score(y, y_pred) > 0.83\n\n # raises error on malformed input for predict\n msg_pairwise = (\n \"The classifier {} does not raise an error when shape of X in \"\n \" {} is not equal to (n_test_samples, n_training_samples)\")\n msg = (\"The classifier {} does not raise an error when the number of \"\n \"features in {} is different from the number of features in \"\n \"fit.\")\n\n if not tags[\"no_validation\"]:\n if _is_pairwise(classifier):\n with raises(\n ValueError,\n err_msg=msg_pairwise.format(name, \"predict\"),\n ):\n classifier.predict(X.reshape(-1, 1))\n else:\n with raises(ValueError, err_msg=msg.format(name, \"predict\")):\n classifier.predict(X.T)\n if hasattr(classifier, \"decision_function\"):\n try:\n # decision_function agrees with predict\n decision = classifier.decision_function(X)\n if n_classes == 2:\n if not tags[\"multioutput_only\"]:\n assert decision.shape == (n_samples,)\n else:\n assert decision.shape == (n_samples, 1)\n dec_pred = (decision.ravel() > 0).astype(int)\n assert_array_equal(dec_pred, y_pred)\n else:\n assert decision.shape == (n_samples, n_classes)\n assert_array_equal(np.argmax(decision, axis=1), y_pred)\n\n # raises error on malformed input for decision_function\n if not tags[\"no_validation\"]:\n if _is_pairwise(classifier):\n with raises(\n ValueError,\n err_msg=msg_pairwise.format(\n name, \"decision_function\"\n ),\n ):\n classifier.decision_function(X.reshape(-1, 1))\n else:\n with raises(\n ValueError,\n err_msg=msg.format(name, \"decision_function\"),\n ):\n classifier.decision_function(X.T)\n except NotImplementedError:\n pass\n\n if hasattr(classifier, \"predict_proba\"):\n # predict_proba agrees with predict\n y_prob = classifier.predict_proba(X)\n assert y_prob.shape == (n_samples, n_classes)\n assert_array_equal(np.argmax(y_prob, axis=1), y_pred)\n # check that probas for all classes sum to one\n assert_array_almost_equal(np.sum(y_prob, axis=1),\n np.ones(n_samples))\n if not tags[\"no_validation\"]:\n # raises error on malformed input for predict_proba\n if _is_pairwise(classifier_orig):\n with raises(\n ValueError,\n err_msg=msg_pairwise.format(name, \"predict_proba\"),\n ):\n classifier.predict_proba(X.reshape(-1, 1))\n else:\n with raises(\n ValueError,\n err_msg=msg.format(name, \"predict_proba\"),\n ):\n classifier.predict_proba(X.T)\n if hasattr(classifier, \"predict_log_proba\"):\n # predict_log_proba is a transformation of predict_proba\n y_log_prob = classifier.predict_log_proba(X)\n assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9)\n assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))\n\n\ndef check_outlier_corruption(num_outliers, expected_outliers, decision):\n # Check for deviation from the precise given contamination level that may\n # be due to ties in the anomaly scores.\n if num_outliers < expected_outliers:\n start = num_outliers\n end = expected_outliers + 1\n else:\n start = expected_outliers\n end = num_outliers + 1\n\n # ensure that all values in the 'critical area' are tied,\n # leading to the observed discrepancy between provided\n # and actual contamination levels.\n sorted_decision = np.sort(decision)\n msg = ('The number of predicted outliers is not equal to the expected '\n 'number of outliers and this difference is not explained by the '\n 'number of ties in the decision_function values')\n assert len(np.unique(sorted_decision[start:end])) == 1, msg\n\n\ndef check_outliers_train(name, estimator_orig, readonly_memmap=True):\n n_samples = 300\n X, _ = make_blobs(n_samples=n_samples, random_state=0)\n X = shuffle(X, random_state=7)\n\n if readonly_memmap:\n X = create_memmap_backed_data(X)\n\n n_samples, n_features = X.shape\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n\n # fit\n estimator.fit(X)\n # with lists\n estimator.fit(X.tolist())\n\n y_pred = estimator.predict(X)\n assert y_pred.shape == (n_samples,)\n assert y_pred.dtype.kind == 'i'\n assert_array_equal(np.unique(y_pred), np.array([-1, 1]))\n\n decision = estimator.decision_function(X)\n scores = estimator.score_samples(X)\n for output in [decision, scores]:\n assert output.dtype == np.dtype('float')\n assert output.shape == (n_samples,)\n\n # raises error on malformed input for predict\n with raises(ValueError):\n estimator.predict(X.T)\n\n # decision_function agrees with predict\n dec_pred = (decision >= 0).astype(int)\n dec_pred[dec_pred == 0] = -1\n assert_array_equal(dec_pred, y_pred)\n\n # raises error on malformed input for decision_function\n with raises(ValueError):\n estimator.decision_function(X.T)\n\n # decision_function is a translation of score_samples\n y_dec = scores - estimator.offset_\n assert_allclose(y_dec, decision)\n\n # raises error on malformed input for score_samples\n with raises(ValueError):\n estimator.score_samples(X.T)\n\n # contamination parameter (not for OneClassSVM which has the nu parameter)\n if (hasattr(estimator, 'contamination')\n and not hasattr(estimator, 'novelty')):\n # proportion of outliers equal to contamination parameter when not\n # set to 'auto'. This is true for the training set and cannot thus be\n # checked as follows for estimators with a novelty parameter such as\n # LocalOutlierFactor (tested in check_outliers_fit_predict)\n expected_outliers = 30\n contamination = expected_outliers / n_samples\n estimator.set_params(contamination=contamination)\n estimator.fit(X)\n y_pred = estimator.predict(X)\n\n num_outliers = np.sum(y_pred != 1)\n # num_outliers should be equal to expected_outliers unless\n # there are ties in the decision_function values. this can\n # only be tested for estimators with a decision_function\n # method, i.e. all estimators except LOF which is already\n # excluded from this if branch.\n if num_outliers != expected_outliers:\n decision = estimator.decision_function(X)\n check_outlier_corruption(num_outliers, expected_outliers, decision)\n\n # raises error when contamination is a scalar and not in [0,1]\n for contamination in [-0.5, 2.3]:\n estimator.set_params(contamination=contamination)\n with raises(ValueError):\n estimator.fit(X)\n\n\n@ignore_warnings(category=(FutureWarning))\ndef check_classifiers_multilabel_representation_invariance(\n name, classifier_orig\n):\n\n X, y = make_multilabel_classification(n_samples=100, n_features=20,\n n_classes=5, n_labels=3,\n length=50, allow_unlabeled=True,\n random_state=0)\n\n X_train, y_train = X[:80], y[:80]\n X_test = X[80:]\n\n y_train_list_of_lists = y_train.tolist()\n y_train_list_of_arrays = list(y_train)\n\n classifier = clone(classifier_orig)\n set_random_state(classifier)\n\n y_pred = classifier.fit(X_train, y_train).predict(X_test)\n\n y_pred_list_of_lists = classifier.fit(\n X_train, y_train_list_of_lists).predict(X_test)\n\n y_pred_list_of_arrays = classifier.fit(\n X_train, y_train_list_of_arrays).predict(X_test)\n\n assert_array_equal(y_pred, y_pred_list_of_arrays)\n assert_array_equal(y_pred, y_pred_list_of_lists)\n\n assert y_pred.dtype == y_pred_list_of_arrays.dtype\n assert y_pred.dtype == y_pred_list_of_lists.dtype\n assert type(y_pred) == type(y_pred_list_of_arrays)\n assert type(y_pred) == type(y_pred_list_of_lists)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_estimators_fit_returns_self(\n name, estimator_orig, readonly_memmap=False\n):\n \"\"\"Check if self is returned when calling fit.\"\"\"\n X, y = make_blobs(random_state=0, n_samples=21)\n # some want non-negative input\n X -= X.min()\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if readonly_memmap:\n X, y = create_memmap_backed_data([X, y])\n\n set_random_state(estimator)\n assert estimator.fit(X, y) is estimator\n\n\n@ignore_warnings\ndef check_estimators_unfitted(name, estimator_orig):\n \"\"\"Check that predict raises an exception in an unfitted estimator.\n\n Unfitted estimators should raise a NotFittedError.\n \"\"\"\n # Common test for Regressors, Classifiers and Outlier detection estimators\n X, y = _regression_dataset()\n\n estimator = clone(estimator_orig)\n for method in ('decision_function', 'predict', 'predict_proba',\n 'predict_log_proba'):\n if hasattr(estimator, method):\n with raises(NotFittedError):\n getattr(estimator, method)(X)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_supervised_y_2d(name, estimator_orig):\n tags = _safe_tags(estimator_orig)\n rnd = np.random.RandomState(0)\n n_samples = 30\n X = _pairwise_estimator_convert_X(\n rnd.uniform(size=(n_samples, 3)), estimator_orig\n )\n y = np.arange(n_samples) % 3\n y = _enforce_estimator_tags_y(estimator_orig, y)\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n # fit\n estimator.fit(X, y)\n y_pred = estimator.predict(X)\n\n set_random_state(estimator)\n # Check that when a 2D y is given, a DataConversionWarning is\n # raised\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\", DataConversionWarning)\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n estimator.fit(X, y[:, np.newaxis])\n y_pred_2d = estimator.predict(X)\n msg = \"expected 1 DataConversionWarning, got: %s\" % (\n \", \".join([str(w_x) for w_x in w]))\n if not tags['multioutput']:\n # check that we warned if we don't support multi-output\n assert len(w) > 0, msg\n assert \"DataConversionWarning('A column-vector y\" \\\n \" was passed when a 1d array was expected\" in msg\n assert_allclose(y_pred.ravel(), y_pred_2d.ravel())\n\n\n@ignore_warnings\ndef check_classifiers_predictions(X, y, name, classifier_orig):\n classes = np.unique(y)\n classifier = clone(classifier_orig)\n if name == 'BernoulliNB':\n X = X > X.mean()\n set_random_state(classifier)\n\n classifier.fit(X, y)\n y_pred = classifier.predict(X)\n\n if hasattr(classifier, \"decision_function\"):\n decision = classifier.decision_function(X)\n assert isinstance(decision, np.ndarray)\n if len(classes) == 2:\n dec_pred = (decision.ravel() > 0).astype(int)\n dec_exp = classifier.classes_[dec_pred]\n assert_array_equal(dec_exp, y_pred,\n err_msg=\"decision_function does not match \"\n \"classifier for %r: expected '%s', got '%s'\" %\n (classifier, \", \".join(map(str, dec_exp)),\n \", \".join(map(str, y_pred))))\n elif getattr(classifier, 'decision_function_shape', 'ovr') == 'ovr':\n decision_y = np.argmax(decision, axis=1).astype(int)\n y_exp = classifier.classes_[decision_y]\n assert_array_equal(y_exp, y_pred,\n err_msg=\"decision_function does not match \"\n \"classifier for %r: expected '%s', got '%s'\" %\n (classifier, \", \".join(map(str, y_exp)),\n \", \".join(map(str, y_pred))))\n\n # training set performance\n if name != \"ComplementNB\":\n # This is a pathological data set for ComplementNB.\n # For some specific cases 'ComplementNB' predicts less classes\n # than expected\n assert_array_equal(np.unique(y), np.unique(y_pred))\n assert_array_equal(classes, classifier.classes_,\n err_msg=\"Unexpected classes_ attribute for %r: \"\n \"expected '%s', got '%s'\" %\n (classifier, \", \".join(map(str, classes)),\n \", \".join(map(str, classifier.classes_))))\n\n\ndef _choose_check_classifiers_labels(name, y, y_names):\n # Semisupervised classifers use -1 as the indicator for an unlabeled\n # sample.\n return y if name in [\"LabelPropagation\",\n \"LabelSpreading\",\n \"SelfTrainingClassifier\"] else y_names\n\n\ndef check_classifiers_classes(name, classifier_orig):\n X_multiclass, y_multiclass = make_blobs(n_samples=30, random_state=0,\n cluster_std=0.1)\n X_multiclass, y_multiclass = shuffle(X_multiclass, y_multiclass,\n random_state=7)\n X_multiclass = StandardScaler().fit_transform(X_multiclass)\n # We need to make sure that we have non negative data, for things\n # like NMF\n X_multiclass -= X_multiclass.min() - .1\n\n X_binary = X_multiclass[y_multiclass != 2]\n y_binary = y_multiclass[y_multiclass != 2]\n\n X_multiclass = _pairwise_estimator_convert_X(X_multiclass, classifier_orig)\n X_binary = _pairwise_estimator_convert_X(X_binary, classifier_orig)\n\n labels_multiclass = [\"one\", \"two\", \"three\"]\n labels_binary = [\"one\", \"two\"]\n\n y_names_multiclass = np.take(labels_multiclass, y_multiclass)\n y_names_binary = np.take(labels_binary, y_binary)\n\n problems = [(X_binary, y_binary, y_names_binary)]\n if not _safe_tags(classifier_orig, key='binary_only'):\n problems.append((X_multiclass, y_multiclass, y_names_multiclass))\n\n for X, y, y_names in problems:\n for y_names_i in [y_names, y_names.astype('O')]:\n y_ = _choose_check_classifiers_labels(name, y, y_names_i)\n check_classifiers_predictions(X, y_, name, classifier_orig)\n\n labels_binary = [-1, 1]\n y_names_binary = np.take(labels_binary, y_binary)\n y_binary = _choose_check_classifiers_labels(name, y_binary, y_names_binary)\n check_classifiers_predictions(X_binary, y_binary, name, classifier_orig)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_regressors_int(name, regressor_orig):\n X, _ = _regression_dataset()\n X = _pairwise_estimator_convert_X(X[:50], regressor_orig)\n rnd = np.random.RandomState(0)\n y = rnd.randint(3, size=X.shape[0])\n y = _enforce_estimator_tags_y(regressor_orig, y)\n rnd = np.random.RandomState(0)\n # separate estimators to control random seeds\n regressor_1 = clone(regressor_orig)\n regressor_2 = clone(regressor_orig)\n set_random_state(regressor_1)\n set_random_state(regressor_2)\n\n if name in CROSS_DECOMPOSITION:\n y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])\n y_ = y_.T\n else:\n y_ = y\n\n # fit\n regressor_1.fit(X, y_)\n pred1 = regressor_1.predict(X)\n regressor_2.fit(X, y_.astype(float))\n pred2 = regressor_2.predict(X)\n assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_regressors_train(\n name, regressor_orig, readonly_memmap=False, X_dtype=np.float64\n):\n X, y = _regression_dataset()\n X = X.astype(X_dtype)\n X = _pairwise_estimator_convert_X(X, regressor_orig)\n y = scale(y) # X is already scaled\n regressor = clone(regressor_orig)\n y = _enforce_estimator_tags_y(regressor, y)\n if name in CROSS_DECOMPOSITION:\n rnd = np.random.RandomState(0)\n y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])\n y_ = y_.T\n else:\n y_ = y\n\n if readonly_memmap:\n X, y, y_ = create_memmap_backed_data([X, y, y_])\n\n if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):\n # linear regressors need to set alpha, but not generalized CV ones\n regressor.alpha = 0.01\n if name == 'PassiveAggressiveRegressor':\n regressor.C = 0.01\n\n # raises error on malformed input for fit\n with raises(\n ValueError,\n err_msg=f\"The classifier {name} does not raise an error when \"\n \"incorrect/malformed input data for fit is passed. The number of \"\n \"training examples is not the same as the number of labels. Perhaps \"\n \"use check_X_y in fit.\",\n ):\n regressor.fit(X, y[:-1])\n # fit\n set_random_state(regressor)\n regressor.fit(X, y_)\n regressor.fit(X.tolist(), y_.tolist())\n y_pred = regressor.predict(X)\n assert y_pred.shape == y_.shape\n\n # TODO: find out why PLS and CCA fail. RANSAC is random\n # and furthermore assumes the presence of outliers, hence\n # skipped\n if not _safe_tags(regressor, key=\"poor_score\"):\n assert regressor.score(X, y_) > 0.5\n\n\n@ignore_warnings\ndef check_regressors_no_decision_function(name, regressor_orig):\n # check that regressors don't have a decision_function, predict_proba, or\n # predict_log_proba method.\n rng = np.random.RandomState(0)\n regressor = clone(regressor_orig)\n\n X = rng.normal(size=(10, 4))\n X = _pairwise_estimator_convert_X(X, regressor_orig)\n y = _enforce_estimator_tags_y(regressor, X[:, 0])\n\n regressor.fit(X, y)\n funcs = [\"decision_function\", \"predict_proba\", \"predict_log_proba\"]\n for func_name in funcs:\n assert not hasattr(regressor, func_name)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_class_weight_classifiers(name, classifier_orig):\n\n if _safe_tags(classifier_orig, key='binary_only'):\n problems = [2]\n else:\n problems = [2, 3]\n\n for n_centers in problems:\n # create a very noisy dataset\n X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,\n random_state=0)\n\n # can't use gram_if_pairwise() here, setting up gram matrix manually\n if _is_pairwise(classifier_orig):\n X_test = rbf_kernel(X_test, X_train)\n X_train = rbf_kernel(X_train, X_train)\n\n n_centers = len(np.unique(y_train))\n\n if n_centers == 2:\n class_weight = {0: 1000, 1: 0.0001}\n else:\n class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}\n\n classifier = clone(classifier_orig).set_params(\n class_weight=class_weight)\n if hasattr(classifier, \"n_iter\"):\n classifier.set_params(n_iter=100)\n if hasattr(classifier, \"max_iter\"):\n classifier.set_params(max_iter=1000)\n if hasattr(classifier, \"min_weight_fraction_leaf\"):\n classifier.set_params(min_weight_fraction_leaf=0.01)\n if hasattr(classifier, \"n_iter_no_change\"):\n classifier.set_params(n_iter_no_change=20)\n if hasattr(classifier, \"hidden_layer_sizes\"):\n raise SkipTest\n\n set_random_state(classifier)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n # XXX: Generally can use 0.89 here. On Windows, LinearSVC gets\n # 0.88 (Issue #9111)\n if not _safe_tags(classifier_orig, key='poor_score'):\n assert np.mean(y_pred == 0) > 0.87\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_class_weight_balanced_classifiers(\n name, classifier_orig, X_train, y_train, X_test, y_test, weights\n):\n classifier = clone(classifier_orig)\n if hasattr(classifier, \"n_iter\"):\n classifier.set_params(n_iter=100)\n if hasattr(classifier, \"max_iter\"):\n classifier.set_params(max_iter=1000)\n\n set_random_state(classifier)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n\n classifier.set_params(class_weight='balanced')\n classifier.fit(X_train, y_train)\n y_pred_balanced = classifier.predict(X_test)\n assert (f1_score(y_test, y_pred_balanced, average='weighted') >\n f1_score(y_test, y_pred, average='weighted'))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_class_weight_balanced_linear_classifier(name, Classifier):\n \"\"\"Test class weights with non-contiguous class labels.\"\"\"\n # this is run on classes, not instances, though this should be changed\n X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],\n [1.0, 1.0], [1.0, 0.0]])\n y = np.array([1, 1, 1, -1, -1])\n\n classifier = Classifier()\n\n if hasattr(classifier, \"n_iter\"):\n # This is a very small dataset, default n_iter are likely to prevent\n # convergence\n classifier.set_params(n_iter=1000)\n if hasattr(classifier, \"max_iter\"):\n classifier.set_params(max_iter=1000)\n if hasattr(classifier, 'cv'):\n classifier.set_params(cv=3)\n set_random_state(classifier)\n\n # Let the model compute the class frequencies\n classifier.set_params(class_weight='balanced')\n coef_balanced = classifier.fit(X, y).coef_.copy()\n\n # Count each label occurrence to reweight manually\n n_samples = len(y)\n n_classes = float(len(np.unique(y)))\n\n class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),\n -1: n_samples / (np.sum(y == -1) * n_classes)}\n classifier.set_params(class_weight=class_weight)\n coef_manual = classifier.fit(X, y).coef_.copy()\n\n assert_allclose(coef_balanced, coef_manual,\n err_msg=\"Classifier %s is not computing\"\n \" class_weight=balanced properly.\"\n % name)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_estimators_overwrite_params(name, estimator_orig):\n X, y = make_blobs(random_state=0, n_samples=21)\n # some want non-negative input\n X -= X.min()\n X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n set_random_state(estimator)\n\n # Make a physical copy of the original estimator parameters before fitting.\n params = estimator.get_params()\n original_params = deepcopy(params)\n\n # Fit the model\n estimator.fit(X, y)\n\n # Compare the state of the model parameters with the original parameters\n new_params = estimator.get_params()\n for param_name, original_value in original_params.items():\n new_value = new_params[param_name]\n\n # We should never change or mutate the internal state of input\n # parameters by default. To check this we use the joblib.hash function\n # that introspects recursively any subobjects to compute a checksum.\n # The only exception to this rule of immutable constructor parameters\n # is possible RandomState instance but in this check we explicitly\n # fixed the random_state params recursively to be integer seeds.\n assert joblib.hash(new_value) == joblib.hash(original_value), (\n \"Estimator %s should not change or mutate \"\n \" the parameter %s from %s to %s during fit.\"\n % (name, param_name, original_value, new_value))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_no_attributes_set_in_init(name, estimator_orig):\n \"\"\"Check setting during init.\"\"\"\n try:\n # Clone fails if the estimator does not store\n # all parameters as an attribute during init\n estimator = clone(estimator_orig)\n except AttributeError:\n raise AttributeError(f\"Estimator {name} should store all \"\n \"parameters as an attribute during init.\")\n\n if hasattr(type(estimator).__init__, \"deprecated_original\"):\n return\n\n init_params = _get_args(type(estimator).__init__)\n if IS_PYPY:\n # __init__ signature has additional objects in PyPy\n for key in ['obj']:\n if key in init_params:\n init_params.remove(key)\n parents_init_params = [param for params_parent in\n (_get_args(parent) for parent in\n type(estimator).__mro__)\n for param in params_parent]\n\n # Test for no setting apart from parameters during init\n invalid_attr = (set(vars(estimator)) - set(init_params)\n - set(parents_init_params))\n assert not invalid_attr, (\n \"Estimator %s should not set any attribute apart\"\n \" from parameters during init. Found attributes %s.\"\n % (name, sorted(invalid_attr)))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_sparsify_coefficients(name, estimator_orig):\n X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],\n [-1, -2], [2, 2], [-2, -2]])\n y = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])\n y = _enforce_estimator_tags_y(estimator_orig, y)\n est = clone(estimator_orig)\n\n est.fit(X, y)\n pred_orig = est.predict(X)\n\n # test sparsify with dense inputs\n est.sparsify()\n assert sparse.issparse(est.coef_)\n pred = est.predict(X)\n assert_array_equal(pred, pred_orig)\n\n # pickle and unpickle with sparse coef_\n est = pickle.loads(pickle.dumps(est))\n assert sparse.issparse(est.coef_)\n pred = est.predict(X)\n assert_array_equal(pred, pred_orig)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_classifier_data_not_an_array(name, estimator_orig):\n X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1],\n [0, 3], [1, 0], [2, 0], [4, 4], [2, 3], [3, 2]])\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = np.array([1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2])\n y = _enforce_estimator_tags_y(estimator_orig, y)\n for obj_type in [\"NotAnArray\", \"PandasDataframe\"]:\n check_estimators_data_not_an_array(name, estimator_orig, X, y,\n obj_type)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_regressor_data_not_an_array(name, estimator_orig):\n X, y = _regression_dataset()\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = _enforce_estimator_tags_y(estimator_orig, y)\n for obj_type in [\"NotAnArray\", \"PandasDataframe\"]:\n check_estimators_data_not_an_array(name, estimator_orig, X, y,\n obj_type)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type):\n if name in CROSS_DECOMPOSITION:\n raise SkipTest(\"Skipping check_estimators_data_not_an_array \"\n \"for cross decomposition module as estimators \"\n \"are not deterministic.\")\n # separate estimators to control random seeds\n estimator_1 = clone(estimator_orig)\n estimator_2 = clone(estimator_orig)\n set_random_state(estimator_1)\n set_random_state(estimator_2)\n\n if obj_type not in [\"NotAnArray\", 'PandasDataframe']:\n raise ValueError(\"Data type {0} not supported\".format(obj_type))\n\n if obj_type == \"NotAnArray\":\n y_ = _NotAnArray(np.asarray(y))\n X_ = _NotAnArray(np.asarray(X))\n else:\n # Here pandas objects (Series and DataFrame) are tested explicitly\n # because some estimators may handle them (especially their indexing)\n # specially.\n try:\n import pandas as pd\n y_ = np.asarray(y)\n if y_.ndim == 1:\n y_ = pd.Series(y_)\n else:\n y_ = pd.DataFrame(y_)\n X_ = pd.DataFrame(np.asarray(X))\n\n except ImportError:\n raise SkipTest(\"pandas is not installed: not checking estimators \"\n \"for pandas objects.\")\n\n # fit\n estimator_1.fit(X_, y_)\n pred1 = estimator_1.predict(X_)\n estimator_2.fit(X, y)\n pred2 = estimator_2.predict(X)\n assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)\n\n\ndef check_parameters_default_constructible(name, Estimator):\n # test default-constructibility\n # get rid of deprecation warnings\n\n Estimator = Estimator.__class__\n\n with ignore_warnings(category=FutureWarning):\n estimator = _construct_instance(Estimator)\n # test cloning\n clone(estimator)\n # test __repr__\n repr(estimator)\n # test that set_params returns self\n assert estimator.set_params() is estimator\n\n # test if init does nothing but set parameters\n # this is important for grid_search etc.\n # We get the default parameters from init and then\n # compare these against the actual values of the attributes.\n\n # this comes from getattr. Gets rid of deprecation decorator.\n init = getattr(estimator.__init__, 'deprecated_original',\n estimator.__init__)\n\n try:\n def param_filter(p):\n \"\"\"Identify hyper parameters of an estimator.\"\"\"\n return (p.name != 'self' and\n p.kind != p.VAR_KEYWORD and\n p.kind != p.VAR_POSITIONAL)\n\n init_params = [p for p in signature(init).parameters.values()\n if param_filter(p)]\n\n except (TypeError, ValueError):\n # init is not a python function.\n # true for mixins\n return\n params = estimator.get_params()\n # they can need a non-default argument\n init_params = init_params[len(getattr(\n estimator, '_required_parameters', [])):]\n\n for init_param in init_params:\n assert init_param.default != init_param.empty, (\n \"parameter %s for %s has no default value\"\n % (init_param.name, type(estimator).__name__))\n allowed_types = {\n str,\n int,\n float,\n bool,\n tuple,\n type(None),\n type,\n types.FunctionType,\n joblib.Memory,\n }\n # Any numpy numeric such as np.int32.\n allowed_types.update(np.core.numerictypes.allTypes.values())\n assert type(init_param.default) in allowed_types, (\n f\"Parameter '{init_param.name}' of estimator \"\n f\"'{Estimator.__name__}' is of type \"\n f\"{type(init_param.default).__name__} which is not \"\n f\"allowed. All init parameters have to be immutable to \"\n f\"make cloning possible. Therefore we restrict the set of \"\n f\"legal types to \"\n f\"{set(type.__name__ for type in allowed_types)}.\"\n )\n if init_param.name not in params.keys():\n # deprecated parameter, not in get_params\n assert init_param.default is None, (\n f\"Estimator parameter '{init_param.name}' of estimator \"\n f\"'{Estimator.__name__}' is not returned by get_params. \"\n f\"If it is deprecated, set its default value to None.\"\n )\n continue\n\n param_value = params[init_param.name]\n if isinstance(param_value, np.ndarray):\n assert_array_equal(param_value, init_param.default)\n else:\n failure_text = (\n f\"Parameter {init_param.name} was mutated on init. All \"\n f\"parameters must be stored unchanged.\"\n )\n if is_scalar_nan(param_value):\n # Allows to set default parameters to np.nan\n assert param_value is init_param.default, failure_text\n else:\n assert param_value == init_param.default, failure_text\n\n\ndef _enforce_estimator_tags_y(estimator, y):\n # Estimators with a `requires_positive_y` tag only accept strictly positive\n # data\n if _safe_tags(estimator, key=\"requires_positive_y\"):\n # Create strictly positive y. The minimal increment above 0 is 1, as\n # y could be of integer dtype.\n y += 1 + abs(y.min())\n # Estimators with a `binary_only` tag only accept up to two unique y values\n if _safe_tags(estimator, key=\"binary_only\") and y.size > 0:\n y = np.where(y == y.flat[0], y, y.flat[0] + 1)\n # Estimators in mono_output_task_error raise ValueError if y is of 1-D\n # Convert into a 2-D y for those estimators.\n if _safe_tags(estimator, key=\"multioutput_only\"):\n return np.reshape(y, (-1, 1))\n return y\n\n\ndef _enforce_estimator_tags_x(estimator, X):\n # Pairwise estimators only accept\n # X of shape (`n_samples`, `n_samples`)\n if _is_pairwise(estimator):\n X = X.dot(X.T)\n # Estimators with `1darray` in `X_types` tag only accept\n # X of shape (`n_samples`,)\n if '1darray' in _safe_tags(estimator, key='X_types'):\n X = X[:, 0]\n # Estimators with a `requires_positive_X` tag only accept\n # strictly positive data\n if _safe_tags(estimator, key='requires_positive_X'):\n X -= X.min()\n return X\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_non_transformer_estimators_n_iter(name, estimator_orig):\n # Test that estimators that are not transformers with a parameter\n # max_iter, return the attribute of n_iter_ at least 1.\n\n # These models are dependent on external solvers like\n # libsvm and accessing the iter parameter is non-trivial.\n # SelfTrainingClassifier does not perform an iteration if all samples are\n # labeled, hence n_iter_ = 0 is valid.\n not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',\n 'RidgeClassifier', 'SVC', 'RandomizedLasso',\n 'LogisticRegressionCV', 'LinearSVC',\n 'LogisticRegression', 'SelfTrainingClassifier']\n\n # Tested in test_transformer_n_iter\n not_run_check_n_iter += CROSS_DECOMPOSITION\n if name in not_run_check_n_iter:\n return\n\n # LassoLars stops early for the default alpha=1.0 the iris dataset.\n if name == 'LassoLars':\n estimator = clone(estimator_orig).set_params(alpha=0.)\n else:\n estimator = clone(estimator_orig)\n if hasattr(estimator, 'max_iter'):\n iris = load_iris()\n X, y_ = iris.data, iris.target\n y_ = _enforce_estimator_tags_y(estimator, y_)\n\n set_random_state(estimator, 0)\n\n estimator.fit(X, y_)\n\n assert estimator.n_iter_ >= 1\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_transformer_n_iter(name, estimator_orig):\n # Test that transformers with a parameter max_iter, return the\n # attribute of n_iter_ at least 1.\n estimator = clone(estimator_orig)\n if hasattr(estimator, \"max_iter\"):\n if name in CROSS_DECOMPOSITION:\n # Check using default data\n X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]\n y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]\n\n else:\n X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0, n_features=2, cluster_std=0.1)\n X -= X.min() - 0.1\n set_random_state(estimator, 0)\n estimator.fit(X, y_)\n\n # These return a n_iter per component.\n if name in CROSS_DECOMPOSITION:\n for iter_ in estimator.n_iter_:\n assert iter_ >= 1\n else:\n assert estimator.n_iter_ >= 1\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_get_params_invariance(name, estimator_orig):\n # Checks if get_params(deep=False) is a subset of get_params(deep=True)\n e = clone(estimator_orig)\n\n shallow_params = e.get_params(deep=False)\n deep_params = e.get_params(deep=True)\n\n assert all(item in deep_params.items() for item in\n shallow_params.items())\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_set_params(name, estimator_orig):\n # Check that get_params() returns the same thing\n # before and after set_params() with some fuzz\n estimator = clone(estimator_orig)\n\n orig_params = estimator.get_params(deep=False)\n msg = \"get_params result does not match what was passed to set_params\"\n\n estimator.set_params(**orig_params)\n curr_params = estimator.get_params(deep=False)\n assert set(orig_params.keys()) == set(curr_params.keys()), msg\n for k, v in curr_params.items():\n assert orig_params[k] is v, msg\n\n # some fuzz values\n test_values = [-np.inf, np.inf, None]\n\n test_params = deepcopy(orig_params)\n for param_name in orig_params.keys():\n default_value = orig_params[param_name]\n for value in test_values:\n test_params[param_name] = value\n try:\n estimator.set_params(**test_params)\n except (TypeError, ValueError) as e:\n e_type = e.__class__.__name__\n # Exception occurred, possibly parameter validation\n warnings.warn(\"{0} occurred during set_params of param {1} on \"\n \"{2}. It is recommended to delay parameter \"\n \"validation until fit.\".format(e_type,\n param_name,\n name))\n\n change_warning_msg = \"Estimator's parameters changed after \" \\\n \"set_params raised {}\".format(e_type)\n params_before_exception = curr_params\n curr_params = estimator.get_params(deep=False)\n try:\n assert (set(params_before_exception.keys()) ==\n set(curr_params.keys()))\n for k, v in curr_params.items():\n assert params_before_exception[k] is v\n except AssertionError:\n warnings.warn(change_warning_msg)\n else:\n curr_params = estimator.get_params(deep=False)\n assert (set(test_params.keys()) ==\n set(curr_params.keys())), msg\n for k, v in curr_params.items():\n assert test_params[k] is v, msg\n test_params[param_name] = default_value\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_classifiers_regression_target(name, estimator_orig):\n # Check if classifier throws an exception when fed regression targets\n\n X, y = _regression_dataset()\n\n X = X + 1 + abs(X.min(axis=0)) # be sure that X is non-negative\n e = clone(estimator_orig)\n msg = \"Unknown label type: \"\n if not _safe_tags(e, key=\"no_validation\"):\n with raises(ValueError, match=msg):\n e.fit(X, y)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_decision_proba_consistency(name, estimator_orig):\n # Check whether an estimator having both decision_function and\n # predict_proba methods has outputs with perfect rank correlation.\n\n centers = [(2, 2), (4, 4)]\n X, y = make_blobs(n_samples=100, random_state=0, n_features=4,\n centers=centers, cluster_std=1.0, shuffle=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=0)\n estimator = clone(estimator_orig)\n\n if (hasattr(estimator, \"decision_function\") and\n hasattr(estimator, \"predict_proba\")):\n\n estimator.fit(X_train, y_train)\n # Since the link function from decision_function() to predict_proba()\n # is sometimes not precise enough (typically expit), we round to the\n # 10th decimal to avoid numerical issues: we compare the rank\n # with deterministic ties rather than get platform specific rank\n # inversions in case of machine level differences.\n a = estimator.predict_proba(X_test)[:, 1].round(decimals=10)\n b = estimator.decision_function(X_test).round(decimals=10)\n assert_array_equal(rankdata(a), rankdata(b))\n\n\ndef check_outliers_fit_predict(name, estimator_orig):\n # Check fit_predict for outlier detectors.\n\n n_samples = 300\n X, _ = make_blobs(n_samples=n_samples, random_state=0)\n X = shuffle(X, random_state=7)\n n_samples, n_features = X.shape\n estimator = clone(estimator_orig)\n\n set_random_state(estimator)\n\n y_pred = estimator.fit_predict(X)\n assert y_pred.shape == (n_samples,)\n assert y_pred.dtype.kind == 'i'\n assert_array_equal(np.unique(y_pred), np.array([-1, 1]))\n\n # check fit_predict = fit.predict when the estimator has both a predict and\n # a fit_predict method. recall that it is already assumed here that the\n # estimator has a fit_predict method\n if hasattr(estimator, 'predict'):\n y_pred_2 = estimator.fit(X).predict(X)\n assert_array_equal(y_pred, y_pred_2)\n\n if hasattr(estimator, \"contamination\"):\n # proportion of outliers equal to contamination parameter when not\n # set to 'auto'\n expected_outliers = 30\n contamination = float(expected_outliers)/n_samples\n estimator.set_params(contamination=contamination)\n y_pred = estimator.fit_predict(X)\n\n num_outliers = np.sum(y_pred != 1)\n # num_outliers should be equal to expected_outliers unless\n # there are ties in the decision_function values. this can\n # only be tested for estimators with a decision_function\n # method\n if (num_outliers != expected_outliers and\n hasattr(estimator, 'decision_function')):\n decision = estimator.decision_function(X)\n check_outlier_corruption(num_outliers, expected_outliers, decision)\n\n # raises error when contamination is a scalar and not in [0,1]\n for contamination in [-0.5, 2.3]:\n estimator.set_params(contamination=contamination)\n with raises(ValueError):\n estimator.fit_predict(X)\n\n\ndef check_fit_non_negative(name, estimator_orig):\n # Check that proper warning is raised for non-negative X\n # when tag requires_positive_X is present\n X = np.array([[-1., 1], [-1., 1]])\n y = np.array([1, 2])\n estimator = clone(estimator_orig)\n with raises(ValueError):\n estimator.fit(X, y)\n\n\ndef check_fit_idempotent(name, estimator_orig):\n # Check that est.fit(X) is the same as est.fit(X).fit(X). Ideally we would\n # check that the estimated parameters during training (e.g. coefs_) are\n # the same, but having a universal comparison function for those\n # attributes is difficult and full of edge cases. So instead we check that\n # predict(), predict_proba(), decision_function() and transform() return\n # the same results.\n\n check_methods = [\"predict\", \"transform\", \"decision_function\",\n \"predict_proba\"]\n rng = np.random.RandomState(0)\n\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n if 'warm_start' in estimator.get_params().keys():\n estimator.set_params(warm_start=False)\n\n n_samples = 100\n X = rng.normal(loc=100, size=(n_samples, 2))\n X = _pairwise_estimator_convert_X(X, estimator)\n if is_regressor(estimator_orig):\n y = rng.normal(size=n_samples)\n else:\n y = rng.randint(low=0, high=2, size=n_samples)\n y = _enforce_estimator_tags_y(estimator, y)\n\n train, test = next(ShuffleSplit(test_size=.2, random_state=rng).split(X))\n X_train, y_train = _safe_split(estimator, X, y, train)\n X_test, y_test = _safe_split(estimator, X, y, test, train)\n\n # Fit for the first time\n estimator.fit(X_train, y_train)\n\n result = {method: getattr(estimator, method)(X_test)\n for method in check_methods\n if hasattr(estimator, method)}\n\n # Fit again\n set_random_state(estimator)\n estimator.fit(X_train, y_train)\n\n for method in check_methods:\n if hasattr(estimator, method):\n new_result = getattr(estimator, method)(X_test)\n if np.issubdtype(new_result.dtype, np.floating):\n tol = 2*np.finfo(new_result.dtype).eps\n else:\n tol = 2*np.finfo(np.float64).eps\n assert_allclose_dense_sparse(\n result[method], new_result,\n atol=max(tol, 1e-9), rtol=max(tol, 1e-7),\n err_msg=\"Idempotency check failed for method {}\".format(method)\n )\n\n\ndef check_n_features_in(name, estimator_orig):\n # Make sure that n_features_in_ attribute doesn't exist until fit is\n # called, and that its value is correct.\n\n rng = np.random.RandomState(0)\n\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n if 'warm_start' in estimator.get_params():\n estimator.set_params(warm_start=False)\n\n n_samples = 100\n X = rng.normal(loc=100, size=(n_samples, 2))\n X = _pairwise_estimator_convert_X(X, estimator)\n if is_regressor(estimator_orig):\n y = rng.normal(size=n_samples)\n else:\n y = rng.randint(low=0, high=2, size=n_samples)\n y = _enforce_estimator_tags_y(estimator, y)\n\n assert not hasattr(estimator, 'n_features_in_')\n estimator.fit(X, y)\n if hasattr(estimator, 'n_features_in_'):\n assert estimator.n_features_in_ == X.shape[1]\n else:\n warnings.warn(\n \"As of scikit-learn 0.23, estimators should expose a \"\n \"n_features_in_ attribute, unless the 'no_validation' tag is \"\n \"True. This attribute should be equal to the number of features \"\n \"passed to the fit method. \"\n \"An error will be raised from version 1.0 (renaming of 0.25) \"\n \"when calling check_estimator(). \"\n \"See SLEP010: \"\n \"https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep010/proposal.html\", # noqa\n FutureWarning\n )\n\n\ndef check_requires_y_none(name, estimator_orig):\n # Make sure that an estimator with requires_y=True fails gracefully when\n # given y=None\n\n rng = np.random.RandomState(0)\n\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n\n n_samples = 100\n X = rng.normal(loc=100, size=(n_samples, 2))\n X = _pairwise_estimator_convert_X(X, estimator)\n\n warning_msg = (\"As of scikit-learn 0.23, estimators should have a \"\n \"'requires_y' tag set to the appropriate value. \"\n \"The default value of the tag is False. \"\n \"An error will be raised from version 1.0 when calling \"\n \"check_estimator() if the tag isn't properly set.\")\n\n expected_err_msgs = (\n \"requires y to be passed, but the target y is None\",\n \"Expected array-like (array or non-string sequence), got None\",\n \"y should be a 1d array\"\n )\n\n try:\n estimator.fit(X, None)\n except ValueError as ve:\n if not any(msg in str(ve) for msg in expected_err_msgs):\n warnings.warn(warning_msg, FutureWarning)\n\n\ndef check_n_features_in_after_fitting(name, estimator_orig):\n # Make sure that n_features_in are checked after fitting\n tags = _safe_tags(estimator_orig)\n\n if \"2darray\" not in tags[\"X_types\"] or tags[\"no_validation\"]:\n return\n\n rng = np.random.RandomState(0)\n\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n if 'warm_start' in estimator.get_params():\n estimator.set_params(warm_start=False)\n\n n_samples = 150\n X = rng.normal(size=(n_samples, 8))\n X = _enforce_estimator_tags_x(estimator, X)\n X = _pairwise_estimator_convert_X(X, estimator)\n\n if is_regressor(estimator):\n y = rng.normal(size=n_samples)\n else:\n y = rng.randint(low=0, high=2, size=n_samples)\n y = _enforce_estimator_tags_y(estimator, y)\n\n estimator.fit(X, y)\n assert estimator.n_features_in_ == X.shape[1]\n\n # check methods will check n_features_in_\n check_methods = [\"predict\", \"transform\", \"decision_function\",\n \"predict_proba\"]\n X_bad = X[:, [1]]\n\n msg = (f\"X has 1 features, but \\\\w+ is expecting {X.shape[1]} \"\n \"features as input\")\n for method in check_methods:\n if not hasattr(estimator, method):\n continue\n with raises(ValueError, match=msg):\n getattr(estimator, method)(X_bad)\n\n # partial_fit will check in the second call\n if not hasattr(estimator, \"partial_fit\"):\n return\n\n estimator = clone(estimator_orig)\n if is_classifier(estimator):\n estimator.partial_fit(X, y, classes=np.unique(y))\n else:\n estimator.partial_fit(X, y)\n assert estimator.n_features_in_ == X.shape[1]\n\n with raises(ValueError, match=msg):\n estimator.partial_fit(X_bad, y)\n\n\ndef check_estimator_get_tags_default_keys(name, estimator_orig):\n # check that if _get_tags is implemented, it contains all keys from\n # _DEFAULT_KEYS\n estimator = clone(estimator_orig)\n if not hasattr(estimator, \"_get_tags\"):\n return\n\n tags_keys = set(estimator._get_tags().keys())\n default_tags_keys = set(_DEFAULT_TAGS.keys())\n assert tags_keys.intersection(default_tags_keys) == default_tags_keys, (\n f\"{name}._get_tags() is missing entries for the following default tags\"\n f\": {default_tags_keys - tags_keys.intersection(default_tags_keys)}\"\n )\n" ]
[ [ "numpy.mean", "numpy.where", "numpy.finfo", "numpy.sort", "scipy.stats.rankdata", "numpy.issubdtype", "numpy.dtype", "numpy.full", "numpy.empty", "numpy.log", "pandas.DataFrame", "numpy.core.numerictypes.allTypes.values", "numpy.take", "numpy.arange", "numpy.argmax", "numpy.random.sample", "scipy.sparse.csr_matrix", "numpy.vstack", "scipy.sparse.issparse", "numpy.array", "numpy.reshape", "numpy.argsort", "numpy.hstack", "numpy.asarray", "numpy.random.RandomState", "numpy.sum", "numpy.random.permutation", "numpy.ones", "numpy.ravel", "pandas.Series", "numpy.unique" ] ]
edwinmwiti/Robotic_Arm_Control
[ "471a8d095e6484f17fa6d27bc8420c66e859bd9c" ]
[ "main.py" ]
[ "\"\"\"\nIntelligent Robotic Servicing System\nMain Interface For the Robotic Arm Control Software\nAuthor: Edwin Mwiti\nCopyright: 2020\n\"\"\"\n\n# imports\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\nimport Pmw\nfrom time import time, sleep\nfrom datetime import datetime\nimport os\nimport sys\nimport numericValidator\nimport tk_tools as tools\nfrom pylive import live_plotter\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\nimport ArduinoConnection\n\n\nclass LoginForm(ttk.Frame):\n def __init__(self, parent):\n \"\"\"Constructor\"\"\"\n ttk.Frame.__init__(self, parent)\n self.parent = parent\n\n window_width = 470\n window_height = 150\n screen_width = self.parent.winfo_screenwidth()\n screen_height = self.parent.winfo_screenheight()\n\n x_position = int((screen_width/2) - (window_width/2))\n y_position = int((screen_height/2) - (window_height/2))\n\n self.parent.geometry(\"{}x{}+{}+{}\".format(window_width, window_height, x_position, y_position))\n\n self.parent.title(\"IRSS|Login\")\n self.parent.configure(background='gray40')\n\n self.widgeter()\n\n def widgeter(self, *args):\n \"\"\"Create widgets for login window\"\"\"\n self.parent.bind(\"<Return>\", self.verify_password_a)\n self.btnConf = {'font': ('verdana', 9), 'background': 'slategray', 'padx': '4', 'width': '8', 'pady': '4'} # button configurations\n self.logo = Label(self.parent, text=\"IRSS\", background='gray40', pady=10, font=('verdana', 13))\n\n self.password = Entry(self.parent, font=('verdana', 12), justify=\"center\")\n self.password.configure(show=\"*\")\n self.password.focus()\n self.filler = Label(self.parent, text=\"\",background='gray40', pady=10, font=('verdana', 13))\n self.login_btn = Button(self.parent, self.btnConf, text=\"Login\", pady=5, relief=RIDGE, command=self.verify_password) # mouse click\n\n # place the widgets\n self.logo.pack()\n self.password.pack()\n self.filler.pack()\n self.login_btn.pack()\n\n def verify_password(self, event=None):\n \"\"\"Verify the password\"\"\"\n # get the password\n if self.password.get() != \"r\":\n messagebox.showerror('IRSS', 'Access Denied')\n self.password.focus()\n else:\n # proceed to the main window\n self.initialize_main_application()\n\n def verify_password_a(self, event):\n \"\"\"Respond to enter key press\"\"\"\n self.verify_password()\n\n def initialize_main_application(self):\n \"\"\"Show the main window if password correct\"\"\"\n self.parent.destroy()\n self.parent = Tk()\n self.main = Main(self.parent)\n self.parent.mainloop()\n\n\nclass Main:\n def __init__(self, parent):\n self.parent = parent\n\n # view fullscreen\n screen_width = self.parent.winfo_screenwidth()\n screen_height = self.parent.winfo_screenheight()\n self.parent.geometry(\"{}x{}\".format(screen_width, screen_height))\n self.parent.title(\"IRSS | Control Software\")\n # self.parent.state('normal')\n self.parent.configure(background='gray40')\n\n self.create_widgets()\n\n def create_widgets(self):\n \"\"\"Create Widgets for the main window\"\"\"\n\n # parent frame\n overall = Frame(self.parent, relief=RIDGE, background='gray40')\n overall.pack(side=LEFT, fill=BOTH)\n\n leftFrame = Frame(overall, relief=FLAT, background='gray40')\n leftFrame.pack(side=TOP, fill=Y)\n\n self.centerFrame = Frame(self.parent, relief=RIDGE, background='gray40')\n self.centerFrame.pack(side=LEFT, fill=Y)\n\n rightFrame = Frame(self.parent, relief=FLAT, background='gray30')\n rightFrame.pack(side=TOP, fill=Y)\n\n # Mode selection Frame\n self.mode = LabelFrame(leftFrame, text=\"Mode\", font=('arial', 11, 'bold'))\n self.mode.configure(background='gray40')\n self.mode.pack(anchor=W)\n self.mode_var = IntVar()\n\n self.manual_mode =Radiobutton(self.mode,\n text='Manual',\n value=1,\n variable=self.mode_var,\n indicatoron=0, fg='red',\n background='gray10',\n font=('verdana', 11, 'bold'),\n width=10,\n command=self.check_current_mode,\n selectcolor='lawngreen').pack(side=LEFT, padx=18, pady=5)\n\n self.intelligent_mode = Radiobutton(self.mode,\n text='Intelligent',\n value=2,\n variable=self.mode_var,\n indicatoron=0, fg='red',\n background='gray10',\n font=('verdana', 11, 'bold'),\n width=10,\n command=self.check_current_mode,\n selectcolor='lawngreen').pack(side=LEFT, padx=18, pady=5)\n\n self.remote_mode = Radiobutton(self.mode,\n text='Remote',\n value=3,\n variable=self.mode_var,\n indicatoron=0, fg='red',\n background='gray10',\n font=('verdana', 11, 'bold'),\n width=10,\n command=self.check_current_mode,\n selectcolor='lawngreen').pack(side=LEFT, padx=18, pady=5)\n\n # ==============================Pick coordinates frame================================================\n self.pick_group = LabelFrame(leftFrame, text=\"Pick Position\", font=('arial',11, 'bold'))\n self.pick_group.configure(background='gray40')\n self.pick_group.pack(anchor=W)\n\n Label(self.pick_group, font=('verdana', 11), text=\"Enter coordinates of the desired position of gripper center\", background='gray40').pack()\n\n # picking position coordinates entry frame\n pick_entry_frame = Frame(self.pick_group, relief=FLAT, background='gray40')\n pick_entry_frame.configure(background='gray40')\n\n x_coords = Frame(pick_entry_frame, relief=FLAT, background='gray40')\n x_coords.pack()\n x_label = Label(x_coords, text='x: ', font=('verdana', 11), background='gray40')\n self.x = numericValidator.NumericEntry(x_coords, font=('verdana', 11, 'bold'),background='gray70', width=10)\n x_label.pack(side=LEFT)\n self.x.pack(side=LEFT)\n\n y_coords = Frame(pick_entry_frame, relief=FLAT, background='gray40')\n y_coords.pack()\n y_label = Label(y_coords, text='y: ', font=('verdana', 11), background='gray40')\n self.y = numericValidator.NumericEntry(y_coords, font=('verdana', 11, 'bold'),background='gray70', width=10)\n y_label.pack(side=LEFT)\n self.y.pack(side=LEFT)\n\n z_coords = Frame(pick_entry_frame, relief=FLAT, background='gray40')\n z_coords.pack()\n z_label = Label(z_coords, text='z: ', font=('verdana', 11), background='gray40')\n self.z = numericValidator.NumericEntry(z_coords, font=('verdana', 11, 'bold'),background='gray70', width=10)\n z_label.pack(side=LEFT)\n self.z.pack(side=LEFT)\n\n # Inverse Kinematics computing button\n self.pick_ik = Button(pick_entry_frame, text=\"Compute IK\", borderwidth=1, font=('verdana', 11, 'bold'), relief=SOLID, background='gray10', fg='orange')\n\n self.pick_ik.pack(pady=5)\n\n pick_entry_frame.pack()\n\n # pick position joint angles\n Label(self.pick_group,font=('verdana', 11), text=\"Joint Angles (deg)\", background='gray40').pack(pady=5)\n joint_angles_frame = Frame(self.pick_group, relief=FLAT, background='gray40')\n\n shoulder_az = Frame(joint_angles_frame, relief=FLAT, background='gray40')\n shoulder_az.pack()\n Label(shoulder_az, text='Shoulder Azimuth:', font=('verdana', 11), background='gray40').pack(side=LEFT)\n self.shoulder_azimuth = numericValidator.NumericEntry(shoulder_az, width=10, font=('verdana', 11, 'bold'),background='gray70',)\n self.shoulder_azimuth.pack(side=LEFT)\n\n shoulder_pivot = Frame(joint_angles_frame, relief=FLAT, background='gray40')\n shoulder_pivot.pack()\n Label(shoulder_pivot, text='Shoulder Pivot: ', font=('verdana', 11), background='gray40').pack(side=LEFT)\n self.shoulder_pivot = numericValidator.NumericEntry(shoulder_pivot,width=10, font=('verdana', 11, 'bold'),background='gray70',)\n self.shoulder_pivot.pack(side=LEFT)\n\n elbow_p = Frame(joint_angles_frame, relief=FLAT, background='gray40')\n elbow_p.pack()\n Label(elbow_p, text='Elbow Pivot: ', font=('verdana', 11), background='gray40').pack(side=LEFT)\n self.elbow_pivot = numericValidator.NumericEntry(elbow_p,width=10, font=('verdana', 11, 'bold'),background='gray70')\n self.elbow_pivot.pack(side=LEFT)\n\n wrist_p = Frame(joint_angles_frame, relief=FLAT, background='gray40')\n wrist_p.pack()\n Label(wrist_p, text='Wrist Pitch: ', font=('verdana', 11), background='gray40').pack(side=LEFT)\n self.wrist_pivot = numericValidator.NumericEntry(wrist_p, width=10, font=('verdana', 11, 'bold'),background='gray70')\n self.wrist_pivot.pack(side=LEFT)\n\n joint_angles_frame.pack(side=TOP)\n\n # pick_x.component('entry').focus_set()\n # ===================================END================================================================\n\n # ===================================placing coordinates frame======================================\n self.place_group = LabelFrame(leftFrame, text='Place Position',font=('arial', 11, 'bold'), background='gray40')\n self.place_group.pack(anchor=W)\n\n Label(self.place_group, font=('verdana', 11), text=\"Enter coordinates of the desired position of gripper center\", background='gray40').pack()\n\n # picking position coordinates entry frame\n place_entry_frame = Frame(self.place_group, relief=FLAT)\n\n place_x_coords = Frame(place_entry_frame, relief=FLAT, background='gray40')\n place_x_coords.pack()\n place_x_label = Label(place_x_coords, text='x: ', font=('verdana', 11), background='gray40')\n self.place_x = numericValidator.NumericEntry(place_x_coords, font=('verdana', 11, 'bold'),background='gray70', width=10)\n place_x_label.pack(side=LEFT)\n self.place_x.pack(side=LEFT)\n\n place_y_coords = Frame(place_entry_frame, relief=FLAT, background='gray40')\n place_y_coords.pack()\n place_y_label = Label(place_y_coords, text='y: ', font=('verdana', 11), background='gray40')\n self.place_y = numericValidator.NumericEntry(place_y_coords,font=('verdana', 11, 'bold'),background='gray70', width=10)\n place_y_label.pack(side=LEFT)\n self.place_y.pack(side=LEFT)\n\n place_z_coords = Frame(place_entry_frame, relief=FLAT, background='gray40')\n place_z_coords.pack()\n place_z_label = Label(place_z_coords, text='z: ', font=('verdana', 11), background='gray40')\n self.place_z = numericValidator.NumericEntry(place_z_coords, font=('verdana', 11, 'bold'),background='gray70', width=10)\n place_z_label.pack(side=LEFT)\n self.place_z.pack(side=LEFT)\n\n # Inverse Kinematics computing button\n self.place_ik = Button(self.place_group, text=\"Compute IK\", borderwidth=1, font=('verdana', 11, 'bold'), relief=SOLID,\n background='gray10', fg='orange')\n\n place_entry_frame.pack()\n\n self.place_ik.pack(pady=5)\n\n # pick position joint angles\n Label(self.place_group, font=('verdana', 11), text=\"Joint Angles (deg)\", background='gray40').pack(pady=5)\n place_joint_angles = Frame(self.place_group, relief=FLAT, background='gray40')\n\n shoulder_az = Frame(place_joint_angles, relief=FLAT, background='gray40')\n shoulder_az.pack()\n Label(shoulder_az, text='Shoulder Azimuth:', font=('verdana', 11), background='gray40').pack(side=LEFT)\n self.shoulder_azimuth = numericValidator.NumericEntry(shoulder_az, width=10, font=('verdana', 11, 'bold'),background='gray70',)\n self.shoulder_azimuth.pack(side=LEFT)\n\n shoulder_pivot = Frame(place_joint_angles, relief=FLAT, background='gray40')\n shoulder_pivot.pack()\n Label(shoulder_pivot, text='Shoulder Pivot: ', font=('verdana', 11), background='gray40').pack(side=LEFT)\n self.shoulder_pivot = numericValidator.NumericEntry(shoulder_pivot, width=10, font=('verdana', 11, 'bold'),background='gray70',)\n self.shoulder_pivot.pack(side=LEFT)\n\n elbow_p = Frame(place_joint_angles, relief=FLAT, background='gray40')\n elbow_p.pack()\n Label(elbow_p, text='Elbow Pivot: ', font=('verdana', 11), background='gray40').pack(side=LEFT)\n self.elbow_pivot = numericValidator.NumericEntry(elbow_p, width=10, font=('verdana', 11, 'bold'),background='gray70',)\n self.elbow_pivot.pack(side=LEFT)\n\n wrist_p = Frame(place_joint_angles, relief=FLAT, background='gray40')\n wrist_p.pack()\n Label(wrist_p, text='Wrist Pitch: ', font=('verdana', 11), background='gray40').pack(side=LEFT)\n self.wrist_pivot = numericValidator.NumericEntry(wrist_p, width=10, font=('verdana', 11, 'bold'),background='gray70',)\n self.wrist_pivot.pack(side=LEFT)\n\n place_joint_angles.pack(side=TOP)\n # ===================================END====================================================================\n\n # ===================================Operation Buttons======================================================\n operations = Frame(leftFrame, relief=FLAT, background='gray40')\n operations.pack(side=LEFT, anchor=W, fill=Y)\n\n #===============================OPERATION BUTTONS================================================\n\n # Load Button\n self.load_parameters = Button(operations, text=' UPLOAD ', font=('lucida', 11, 'bold'), fg='red', width = 11, background='gray60', command=self.load_to_console)\n self.load_parameters.pack(padx=4,pady=10, side=LEFT)\n\n # run button\n self.btnRun = Button(operations, text='RUN ', font=('lucida', 11, 'bold'), fg='red', width = 10, background='gray60',command=self.run)\n self.btnRun.pack(padx=4,pady=10, side=LEFT)\n\n self.btnStop = Button(operations, text='STOP', font=('lucida', 11, 'bold'), fg='red', width=10, background='gray60',command=self.stop)\n self.btnStop.pack(padx=4,pady=10, side=LEFT)\n\n self.btnExit = Button(operations, text='EXIT', font=('lucida', 11, 'bold'), fg='red', width=10, background='gray60',command=self.exit)\n self.btnExit.pack(padx=4,pady=10, side=LEFT)\n # =================================END========================================================================\n\n # ==================================No of objects to be picked================================================\n\n parametersFrame = LabelFrame(self.centerFrame, background='gray40', text='Operation Parameters', font=('arial', 11, 'bold'))\n parametersFrame.pack(side=TOP)\n\n objects = Frame(parametersFrame, background='gray40')\n objectsLabel = Label(objects, text='No. of Objects: ', font=('verdana', 11), background='gray40')\n self.objectsEntry = numericValidator.NumericEntry(objects, background='gray70', font=('verdana', 11, 'bold'), width=20)\n objectsLabel.pack(side=LEFT)\n self.objectsEntry.pack(side=LEFT, pady=4)\n\n objects.pack(side=TOP)\n\n servoFrame = Frame(parametersFrame, background='gray40')\n servoLabel = Label(servoFrame, text=\"Servo Speed: \", font=('verdana', 11), background='gray40')\n # servoEntry = Entry(servoFrame, background='gray70', width=10)\n self.servoEntry = Pmw.Counter(servoFrame,\n entry_width=30,\n entryfield_value='12.5',\n datatype={'counter':'real', 'separator':'.'},\n entryfield_validate={'validator':'real', 'min':0.0, 'max':15.0, 'separator':'.'},\n increment=.2,\n\n )\n\n servoLabel.pack(side=LEFT)\n self.servoEntry.pack(side=LEFT, fill=Y, pady=4)\n servoFrame.pack(side=LEFT)\n\n # ==================================END======================================================================\n\n # status bar\n sbar = Frame(overall, relief=SUNKEN, background='gray40')\n self.statusbar = Label(sbar, text='Status:', bd=1, anchor=W, font=('consolas', 13, 'bold'))\n self.statusbar.configure(background='gray40', fg='lawngreen')\n self.cstat = Label(sbar, text='...', fg='lawngreen', background='gray40', font=('consolas', 13, 'bold'))\n\n self.statusbar.pack(anchor=W, side=LEFT)\n self.cstat.pack(anchor=W, side=LEFT)\n sbar.pack(fill=X)\n\n jaw_label = Label(self.centerFrame, text='Set Jaw width(mm)', font=('verdana', 10, 'bold'), background='gray40')\n jaw_label.pack(anchor=CENTER)\n\n # CREATE THE JAW WIDTH SLIDER\n self.slider_var = DoubleVar()\n self.scaler = Scale(self.centerFrame,\n variable=self.slider_var,\n orient=HORIZONTAL,\n from_=0,\n to=90,\n tickinterval=0.2,\n sliderlength=4,\n relief=FLAT,\n length=355,\n fg='steelblue',\n bg='gray40',\n activebackground='brown',\n font=('consolas', 11, 'bold')\n )\n self.scaler.pack(anchor=CENTER)\n\n # ==================System info==============================================\n # Show the robot arm image\n simulator = LabelFrame(self.centerFrame, text='System Information', font=('arial', 11, 'bold'), height=700)\n simulator.pack(side=TOP, fill=Y, anchor=W)\n simulator.configure(background='gray40')\n\n # environment_parameters = LabelFrame(simulator, text='<<<Loaded Parameters:>', font=('consolas', 11))\n # environment_parameters.configure(background='gray40', fg='black')\n # environment_parameters.pack(side=TOP)\n\n tFrame = Frame(simulator, relief=FLAT)\n Label(tFrame, text='Modified: ', fg='black', background='gray40', font=('consolas', 11)).pack(side=LEFT)\n self.ctime = Label(tFrame, text='_', fg='black', background='gray40', font=('consolas', 11))\n self.ctime.pack(anchor=W)\n tFrame.pack(anchor=W)\n\n filler = Label(simulator, text='============================================', background='gray40',\n fg='black')\n filler.pack()\n\n username = Frame(simulator, relief=FLAT)\n Label(username, text='Current User :', fg='black', background='gray40', font=('consolas', 11)).pack(side=LEFT)\n self.uname = Label(username, text='e5430', background='gray40', fg='black', font=('consolas', 11))\n self.uname.pack(anchor=W)\n username.pack(anchor=W)\n\n operatinSys = Frame(simulator, relief=FLAT)\n Label(operatinSys, text='OS :', fg='black', background='gray40', font=('consolas', 11)).pack(side=LEFT)\n self.operatingsys = Label(operatinSys, text='_', background='gray40', fg='black', font=('consolas', 11))\n self.operatingsys.pack(anchor=W)\n operatinSys.pack(anchor=W)\n\n controller = Frame(simulator, relief=FLAT)\n Label(controller, text='Microcontroller :', fg='black', background='gray40', font=('consolas', 11)).pack(\n side=LEFT)\n self.control = Label(controller, text='Arduino UNO ATMEGA328P', background='gray40', fg='black',\n font=('consolas', 11))\n self.control.pack(anchor=W)\n controller.pack(anchor=W)\n\n configuration = Frame(simulator, relief=FLAT)\n Label(configuration, text='DoF :', fg='black', background='gray40', font=('consolas', 11)).pack(side=LEFT)\n self.config = Label(configuration, text='6', background='gray40', fg='black', font=('consolas', 11))\n self.config.pack(anchor=W)\n configuration.pack(anchor=W)\n\n run = Frame(simulator, relief=FLAT)\n Label(run, text='Run mode:', fg='black', background='gray40', font=('consolas', 11)).pack(side=LEFT)\n self.run_mode = Label(run, text='_', background='gray40', fg='black', font=('consolas', 11))\n self.run_mode.pack(anchor=W)\n run.pack(anchor=W)\n\n coords = Frame(simulator, relief=FLAT)\n Label(coords, text='Pick coordinates : ', fg='black', background='gray40', font=('consolas', 11)).pack(\n side=LEFT)\n self.pick_coords = Label(coords, text='_', background='gray40', fg='black', font=('consolas', 11))\n self.pick_coords.pack(anchor=W)\n coords.pack(anchor=W)\n\n pcoords = Frame(simulator, relief=FLAT)\n Label(pcoords, text='Place Coordinates : ', fg='black', background='gray40', font=('consolas', 11)).pack(\n side=LEFT)\n self.place_coords = Label(pcoords, text='_', background='gray40', fg='black', font=('consolas', 11))\n self.place_coords.pack(anchor=W)\n pcoords.pack(anchor=W)\n\n speed = Frame(simulator, relief=FLAT)\n Label(speed, text='Servo speed : ', fg='black', background='gray40', font=('consolas', 11)).pack(side=LEFT)\n self.servo_speed = Label(speed, text='_', background='gray40', fg='black', font=('consolas', 11))\n self.servo_speed.pack(anchor=W)\n speed.pack(anchor=W)\n\n objs = Frame(simulator, relief=FLAT)\n Label(objs, text='Objects : ', fg='black', background='gray40', font=('consolas', 11)).pack(side=LEFT)\n self.object = Label(objs, text='_', background='gray40', fg='black', font=('consolas', 11))\n self.object.pack(anchor=W)\n objs.pack(anchor=W)\n\n grip = Frame(simulator, relief=FLAT)\n Label(grip, text='Gripper width : ', fg='black', background='gray40', font=('consolas', 11)).pack(side=LEFT)\n self.gripper_width = Label(grip, text='_', background='gray40', fg='black', font=('consolas', 11))\n self.gripper_width.pack(anchor=W)\n grip.pack(anchor=W)\n\n # =====================END========================================================\n\n # Serial port\n self.centerLower = Frame(self.centerFrame, background='gray40')\n self.centerLower.pack(side=TOP)\n\n serialFrame = Frame(self.centerLower, background='gray40')\n serialFrame.pack(fill=X, side=TOP)\n\n serialLabel = Label(serialFrame, text='Serial Port: ', background='gray40', font=('verdana', 11))\n serialLabel.pack(side=LEFT, anchor=W, fill=X)\n self.ports=['COM13', 'COM17']\n self.serialport = ttk.Combobox(serialFrame, font=('courier', 11),\n values=['COM13', 'COM17'])\n\n self.serialport.pack(side=LEFT)\n self.serialport.current(0)\n\n self.connect_status = Frame(self.centerLower, background='gray40')\n self.connect_status.pack(side=BOTTOM)\n self.connectLabel = Label(self.connect_status, text='', background='gray40', font=('verdana', 10, 'bold'))\n self.connectLabel.pack(anchor=CENTER)\n\n # =====================ANALOG GAUGES================================================\n\n # subdividing the right frame\n rightupper = LabelFrame(rightFrame, bg='gray40', text='Monitors', font=('verdana', 10, 'bold'))\n rightupper.pack()\n\n self.inputVoltage = tools.Gauge(rightupper,\n width=180,\n height=150,\n min_value=0.0, max_value=5,\n label='Volts-In',\n unit='V',\n bg='gray40',\n yellow=100,\n red=100,\n red_low=80)\n self.inputVoltage.set_value(4.5)\n self.inputVoltage.grid(row=0, column=0)\n\n self.motorOne = tools.Gauge(rightupper,\n width=180,\n height=150,\n min_value=0.0, max_value=5,\n label='M1',\n unit='mW',\n bg='gray40',\n red_low=70,\n yellow=100)\n self.motorOne.set_value(4)\n self.motorOne.grid(row=0, column=1)\n\n self.motorTwo = tools.Gauge(rightupper,\n width=180,\n height=150,\n min_value=0.0, max_value=5,\n label='M2',\n unit='mW',\n bg='gray40',\n red_low=70,\n yellow=100)\n self.motorTwo.set_value(4.5)\n self.motorTwo.grid(row=0, column=2)\n\n self.motorThree = tools.Gauge(rightupper,\n width=180,\n height=150,\n min_value=0.0, max_value=5,\n label='M3',\n unit='mW',\n bg='gray40',\n red_low=70,\n yellow=100)\n self.motorThree.set_value(4.5)\n self.motorThree.grid(row=1, column=0)\n\n self.motorFour = tools.Gauge(rightupper,\n width=180,\n height=150,\n min_value=0.0, max_value=5,\n label='M4',\n unit='mW',\n bg='gray40',\n red_low=70,\n yellow=100)\n self.motorFour.set_value(3)\n self.motorFour.grid(row=1, column=1)\n #\n self.motorFive = tools.Gauge(rightupper,\n width=180,\n height=150,\n min_value=0.0, max_value=5,\n label='M5',\n unit='mw',\n bg='gray40',\n red_low=70,\n yellow=100)\n self.motorFive.set_value(4.5)\n self.motorFive.grid(row=1, column=2)\n #\n self.gripperMotor = tools.Gauge(rightupper,\n width=180,\n height=150,\n min_value=0.0, max_value=5,\n label='Gmotor',\n unit='mW',\n bg='gray40',\n red_low=70,\n yellow=100)\n self.gripperMotor.set_value(4.5)\n self.gripperMotor.grid(row=2, column=0)\n\n self.gripperForce = tools.Gauge(rightupper,\n width=180,\n height=150,\n min_value=0.0, max_value=5,\n label='GrForce',\n unit='mN',\n bg='gray40',\n red_low=70,\n yellow=100)\n self.gripperForce.set_value(4.5)\n self.gripperForce.grid(row=2, column=1)\n\n self.proximitySensor = tools.Gauge(rightupper,\n width=180,\n height=150,\n min_value=0.0, max_value=5,\n label='Contact',\n unit='mm',\n bg='gray40',\n yellow=100,\n red=100)\n self.proximitySensor.set_value(4.5)\n self.proximitySensor.grid(row=2, column=2)\n # ===============================END================================================\n\n # ============================GRIPPER FORCE GRAPH====================================\n rightLower = LabelFrame(rightFrame, bg='gray40', text='Gripper Force', font=('verdana', 10, 'bold'))\n rightLower.pack(fill=X)\n\n figure = Figure(figsize=(4.5, 2.5), dpi=100)\n figure.tight_layout(h_pad=5)\n\n subplot = figure.add_subplot(111)\n # subplot.set_xlabel('Time(s)')\n subplot.set_ylabel('Force(N)')\n subplot.plot([1,2,3,4,5,6,7,8], [5,6,7,5,6,1,2,3], color='orange')\n\n\n graphcanvas = FigureCanvasTkAgg(figure, rightLower)\n graphcanvas.draw()\n graphcanvas.get_tk_widget().pack(fill=X)\n\n # =================================END===============================================\n\n\n\n def check_current_mode(self):\n \"\"\"call respective functions for the selected robot operation mode\"\"\"\n\n # update status bar\n\n print(self.mode_var.get())\n\n def console_mode(self):\n \"\"\"Display the current mode of operation\"\"\"\n rmode = self.mode_var.get()\n\n mode = 'ERROR'\n if rmode == 1:\n mode = 'Manual...'\n elif rmode == 2:\n mode = 'Intelligent...'\n elif rmode == 3:\n mode = 'Remote...'\n\n return mode\n\n def load_to_console(self):\n \"\"\"Console display configurations\"\"\"\n\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n current_time = str(current_time)\n self.ctime.config(text=current_time)\n\n user = os.getlogin()\n self.uname.config(text=user)\n\n opsys = sys.platform\n self.operatingsys.config(text=opsys)\n\n all_coordinates = self.get_all_coordinates()\n\n # get the operation coordinates\n self.get_all_coordinates()\n self.pick_coords.config(text=all_coordinates[0])\n self.place_coords.config(text=all_coordinates[1])\n\n self.servo_speed.config(text=str(self.get_servo_speed()))\n\n self.object.config(text=str(self.get_no_of_objects()))\n\n self.run_mode.config(text=str(self.console_mode()))\n\n # create connection with the Arduino board\n self.connect_status = ArduinoConnection.create_arduino_connection(self.get_port())\n\n # check the connection status\n if self.connect_status is True:\n self.conn_status = 'Connection successful'\n # create label to display the status\n self.connectLabel.config(text=self.conn_status, fg='green')\n else:\n self.conn_status = 'Connection failed. Please try again.'\n self.connectLabel.config(text=self.conn_status, fg='red')\n\n\n def get_port(self):\n \"\"\"Fetch the serial port chosen\"\"\"\n return self.serialport.get()\n\n\n def get_all_coordinates(self):\n \"\"\"Get all coordinates => Pick and place\n (pick), (place)\n (x, y, z), (x, y, z)\n \"\"\"\n return [(self.x.get(),self.y.get(),self.z.get()), (self.place_x.get(), self.place_y.get(), self.place_z.get())]\n\n def get_servo_speed(self):\n s = self.servoEntry.getvalue()\n return s\n\n def get_no_of_objects(self):\n o = self.objectsEntry.get()\n return o\n\n def run(self):\n \"\"\"Run the arm\"\"\"\n\n #update status bar\n self.cstat.config(text='RUNNING...')\n\n def stop(self):\n pass\n\n def exit(self):\n self.parent.destroy()\n\nif __name__ == \"__main__\":\n root = Tk()\n Pmw.initialise()\n LoginForm(root)\n root.mainloop()\n" ]
[ [ "matplotlib.figure.Figure", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg" ] ]
6Ulm/unbalanced_gromov_wasserstein
[ "be23571f653dab16fd0722cb1ec2c3412a1e3f30" ]
[ "experiments_pu/compute_prediction.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 12 10:58:27 2020\n\nExperiments where one marginal is fixed\n\"\"\"\n\nimport os\nimport numpy as np\nfrom joblib import Parallel, delayed\nimport torch\nimport ot\n\nfrom unbalancedgw.batch_stable_ugw_solver import log_batch_ugw_sinkhorn\nfrom unbalancedgw._batch_utils import compute_batch_flb_plan\nimport utils\nfrom partial_gw import compute_cost_matrices\n\nfolder = \"marginals_without_rescaling\"\npath = os.getcwd() + \"/saved_plans\"\nif not os.path.isdir(path):\n os.mkdir(path)\npath = path + \"/\" + folder\nif not os.path.isdir(path):\n os.mkdir(path)\n\n\ndef euclid_dist(x, y):\n \"\"\"\n Computes the euclidean distance between two pointclouds, returning a\n matrix whose coordinates are the distance between two points.\n\n Parameters\n ----------\n\n x: torch.Tensor of size [size_X, dim]\n coordinates of the first group of vectors of R^d.\n\n y: torch.Tensor of size [size_Y, dim]\n coordinates of the second group of vectors of R^d.\n\n Returns\n -------\n torch.Tensor of size [size_X, size_Y]\n Matrix of all pairwise distances.\n \"\"\"\n return (x[:, None, :] - y[None, :, :]).norm(p=2, dim=2)\n\n\ndef prepare_initialisation(dataset_p, dataset_u, n_pos, n_unl, prior, nb_try):\n \"\"\"\n Compute the tensor used as initialization for UGW.\n The init is obtained by solving partial EMD as in Chapel et al. when the\n domains are the same.\n\n Parameters\n ----------\n\n dataset_p: string\n name of the dataset used for positive data\n\n dataset_u: string\n name of the dataset used for unlabeled data\n\n n_pos: int\n number of positives samples\n\n n_unl: int\n number of unlabeled samples\n\n prior: float\n proportion of positive samples in the unlabeled dataset\n\n nb_try: int\n number of folds to perform PU learning\n\n Returns\n -------\n init_plan: torch.Tensor of size [nb_try, n_pos, n_unl]\n Set of initialization plans used to init UGW.\n \"\"\"\n init_plan = torch.zeros([nb_try, n_pos, n_unl])\n for i in range(nb_try):\n # Draw dataset\n P, U, _ = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos,\n n_unl, prior, seed_nb=i)\n Ctot, C1, C2, mu, nu = compute_cost_matrices(P, U, prior,\n nb_dummies=10)\n # Compute init\n init_plan[i] = torch.tensor(ot.emd(mu, nu, Ctot)[:n_pos, :])\n return init_plan\n\n\ndef compute_plan_ugw(dataset_p, dataset_u, n_pos, n_unl, prior, eps, rho, rho2,\n nb_try, device=0):\n # Set default type and GPU device\n torch.cuda.set_device(device)\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n # keep constant to normalize cost, uniform over folds by taking first batch\n # P, U, _ = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos, n_unl,\n # prior, 0)\n # U = torch.tensor(U.values,dtype=torch.float) # Convert to torch\n # cst_norm = euclid_dist(U, U).max()\n\n # Draw cost for all seeds as batch\n Cx = torch.zeros([nb_try, n_pos, n_pos])\n Cy = torch.zeros([nb_try, n_unl, n_unl])\n for i in range(nb_try):\n P, U, y_u = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos,\n n_unl, prior, seed_nb=i)\n P, U = torch.tensor(P.values, dtype=torch.float), \\\n torch.tensor(U.values, dtype=torch.float)\n cx, cy = euclid_dist(P, P), euclid_dist(U, U)\n Cx[i], Cy[i] = cx, cy\n # Cx[i], Cy[i] = cx / cst_norm, cy / cst_norm\n del cx, cy\n\n # Compute init and weights\n mu = (torch.ones([n_pos]) / n_pos).expand(nb_try, -1)\n nu = (torch.ones([n_unl]) / n_unl).expand(nb_try, -1)\n if P.shape[1] == U.shape[1]: # If domains are the same\n init_plan = prepare_initialisation(dataset_p, dataset_u, n_pos, n_unl,\n prior, nb_try)\n else:\n _, _, init_plan = compute_batch_flb_plan(\n mu, Cx, nu, Cy, eps=eps, rho=rho, rho2=rho2,\n nits_sinkhorn=50000, tol_sinkhorn=1e-5)\n\n # Compute the marginal of init and save as file\n pi_numpy = init_plan.sum(dim=1).cpu().data.numpy()\n fname = f'/ugw_init_{dataset_p}_{n_pos}_{dataset_u}_{n_unl}_' \\\n f'prior{prior}_eps{eps}_rho{rho}_rho{rho2}_reps{nb_try}.npy'\n np.save(path + fname, pi_numpy)\n\n # Set params and start the grid wrt entropic param eps\n pi = log_batch_ugw_sinkhorn(mu, Cx, nu, Cy, init=init_plan,\n eps=eps, rho=rho, rho2=rho2,\n nits_plan=3000, tol_plan=1e-5,\n nits_sinkhorn=3000, tol_sinkhorn=1e-6)\n if torch.any(torch.isnan(pi)):\n raise Exception(f\"Solver got NaN plan with params (eps, rho) = \"\n f\"{dataset_p, dataset_u, nb_try, eps, rho, rho2}\")\n\n # Compute the marginal and save as file\n pi_numpy = pi.sum(dim=1).cpu().data.numpy()\n fname = f'/ugw_plan_{dataset_p}_{n_pos}_{dataset_u}_{n_unl}_' \\\n f'prior{prior}_eps{eps}_rho{rho}_rho{rho2}_reps{nb_try}.npy'\n np.save(path + fname, pi_numpy)\n\n print(\n f\"DONE = Dataset {dataset_p, dataset_u}, eps = {eps}, \"\n f\"rho = {rho, rho2}, reps = {nb_try}\")\n return\n\n\nif __name__ == '__main__':\n parallel_gpu = True\n\n # epsilon Set to 2**-9 but an be optimized via grid-search\n grid_eps = [2. ** k for k in range(-9, -8, 1)]\n grid_rho = [2. ** k for k in range(-10, -4, 1)]\n nb_try = 40\n\n # List all tasks for the Caltech datasets\n list_tasks = []\n # # Matching similar features - prior set to 10%\n n_pos, n_unl, prior = 100, 100, 0.1\n list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr']\n list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam',\n 'decaf_dslr']\n list_data = [('surf_Caltech', d) for d in list_surf] + [\n ('decaf_caltech', d) for d in list_decaf]\n list_tasks = list_tasks + [\n (data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)\n for (data_pos, data_unl) in list_data for eps in grid_eps\n for rho in grid_rho for rho2 in grid_rho]\n\n # # Matching similar features - prior set to 20%\n n_pos, n_unl, prior = 100, 100, 0.2\n list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam']\n list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam']\n list_data = [('surf_Caltech', d) for d in list_surf] + [\n ('decaf_caltech', d) for d in list_decaf]\n list_tasks = list_tasks + [\n (data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)\n for (data_pos, data_unl) in list_data for eps in grid_eps\n for rho in grid_rho for rho2 in grid_rho]\n\n # Matching different features - prior set to 10%\n n_pos, n_unl, prior = 100, 100, 0.1\n list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr']\n list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam',\n 'decaf_dslr']\n list_data = [('surf_Caltech', d) for d in list_decaf] + [\n ('decaf_caltech', d) for d in list_surf]\n list_tasks = list_tasks + [\n (data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)\n for (data_pos, data_unl) in list_data for eps in grid_eps\n for rho in grid_rho for rho2 in grid_rho]\n\n # # Matching different features - prior set to 20%\n n_pos, n_unl, prior = 100, 100, 0.2\n list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam']\n list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam']\n list_data = [('surf_Caltech', d) for d in list_decaf] + [\n ('decaf_caltech', d) for d in list_surf]\n list_tasks = list_tasks + [\n (data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)\n for (data_pos, data_unl) in list_data for eps in grid_eps\n for rho in grid_rho for rho2 in grid_rho]\n\n if parallel_gpu:\n assert torch.cuda.is_available()\n list_device = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n total_devices = torch.cuda.device_count()\n print(\n f\"Parallel computation // Total GPUs available = {total_devices}\")\n pll = Parallel(n_jobs=total_devices)\n iterator = (\n delayed(compute_plan_ugw)(data_pos, data_unl, n_pos, n_unl, prior,\n eps, rho, rho2, nb_try,\n device=list_device[k % total_devices])\n for\n k, (\n data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2,\n nb_try) in\n enumerate(list_tasks))\n pll(iterator)\n\n else:\n print(\"Not Parallel\")\n for (data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2,\n nb_try) in list_tasks:\n compute_plan_ugw(data_pos, data_unl, n_pos, n_unl, prior, eps, rho,\n rho2, nb_try)\n print(f'{data_pos, data_unl} done.')\n" ]
[ [ "torch.zeros", "torch.isnan", "torch.set_default_tensor_type", "numpy.save", "torch.cuda.device_count", "torch.ones", "torch.cuda.set_device", "torch.cuda.is_available", "torch.tensor" ] ]
Yash-10/numbakit-ode
[ "aa5a0f417a2218bd471db754b35cc61996b2461e" ]
[ "nbkode/nbcompat/common.py" ]
[ "\"\"\"\n nbkode.nbcompat.common\n ~~~~~~~~~~~~~~~~~~~~~~\n\n Common methods.\n\n :copyright: 2020 by nbkode Authors, see AUTHORS for more details.\n :license: BSD, see LICENSE for more details.\n\"\"\"\n\nimport numpy as np\n\nfrom .nb_to_import import numba\n\n\[email protected]()\ndef isclose(a, b, atol, rtol):\n return np.abs(a - b) <= (atol + rtol * np.abs(b))\n\n\[email protected]\ndef clip(x, xmin, xmax):\n return min(max(x, xmin), xmax)\n" ]
[ [ "numpy.abs" ] ]
youssriaboelseod/deep-learning-v2-pytorch
[ "db0cf684d58ed660a1d3b334661bbc88050e4dc1" ]
[ "gan-mnist/MNIST_GAN_Exercise.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Generative Adversarial Network\n# \n# In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!\n# \n# GANs were [first reported on](https://arxiv.org/abs/1406.2661) in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:\n# \n# * [Pix2Pix](https://affinelayer.com/pixsrv/) \n# * [CycleGAN & Pix2Pix in PyTorch, Jun-Yan Zhu](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix)\n# * [A list of generative models](https://github.com/wiseodd/generative-models)\n# \n# The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes \"fake\" data to pass to the discriminator. The discriminator also sees real training data and predicts if the data it's received is real or fake. \n# > * The generator is trained to fool the discriminator, it wants to output data that looks _as close as possible_ to real, training data. \n# * The discriminator is a classifier that is trained to figure out which data is real and which is fake. \n# \n# What ends up happening is that the generator learns to make data that is indistinguishable from real data to the discriminator.\n# \n# <img src='assets/gan_pipeline.png' width=70% />\n# \n# The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector that the generator uses to construct its fake images. This is often called a **latent vector** and that vector space is called **latent space**. As the generator trains, it figures out how to map latent vectors to recognizable images that can fool the discriminator.\n# \n# If you're interested in generating only new images, you can throw out the discriminator after training. In this notebook, I'll show you how to define and train these adversarial networks in PyTorch and generate new images!\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 64\n\n# convert data to torch.FloatTensor\ntransform = transforms.ToTensor()\n\n# get the training datasets\ntrain_data = datasets.MNIST(root='data', train=True,\n download=True, transform=transform)\n\n# prepare data loader\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,\n num_workers=num_workers)\n\n\n# ### Visualize the data\n\n# In[ ]:\n\n\n# obtain one batch of training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.numpy()\n\n# get one image from the batch\nimg = np.squeeze(images[0])\n\nfig = plt.figure(figsize = (3,3)) \nax = fig.add_subplot(111)\nax.imshow(img, cmap='gray')\n\n\n# ---\n# # Define the Model\n# \n# A GAN is comprised of two adversarial networks, a discriminator and a generator.\n\n# ## Discriminator\n# \n# The discriminator network is going to be a pretty typical linear classifier. To make this network a universal function approximator, we'll need at least one hidden layer, and these hidden layers should have one key attribute:\n# > All hidden layers will have a [Leaky ReLu](https://pytorch.org/docs/stable/nn.html#torch.nn.LeakyReLU) activation function applied to their outputs.\n# \n# <img src='assets/gan_network.png' width=70% />\n# \n# #### Leaky ReLu\n# \n# We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.\n# \n# <img src='assets/leaky_relu.png' width=40% />\n# \n# #### Sigmoid Output\n# \n# We'll also take the approach of using a more numerically stable loss function on the outputs. Recall that we want the discriminator to output a value 0-1 indicating whether an image is _real or fake_. \n# > We will ultimately use [BCEWithLogitsLoss](https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss), which combines a `sigmoid` activation function **and** and binary cross entropy loss in one function. \n# \n# So, our final output layer should not have any activation function applied to it.\n\n# In[ ]:\n\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Discriminator(nn.Module):\n\n def __init__(self, input_size, hidden_dim, output_size):\n super(Discriminator, self).__init__()\n \n # define hidden linear layers\n self.fc1 = nn.Linear(input_size, hidden_dim*4)\n self.fc2 = nn.Linear(hidden_dim*4, hidden_dim*2)\n self.fc3 = nn.Linear(hidden_dim*2, hidden_dim)\n \n # final fully-connected layer\n self.fc4 = nn.Linear(hidden_dim, output_size)\n \n # dropout layer \n self.dropout = nn.Dropout(0.3)\n \n def forward(self, x):\n # flatten image\n x = x.view(-1, 28*28)\n\n # pass x through all layers\n x = F.leaky_relu(self.fc1(x), 0.2) # (input, negative_slope=0.2)\n x = self.dropout(x)\n x = F.leaky_relu(self.fc2(x), 0.2)\n x = self.dropout(x)\n x = F.leaky_relu(self.fc3(x), 0.2)\n x = self.dropout(x)\n\n # apply leaky relu activation to all hidden layers\n out = self.fc4(x)\n return x\n\n\n# ## Generator\n# \n# The generator network will be almost exactly the same as the discriminator network, except that we're applying a [tanh activation function](https://pytorch.org/docs/stable/nn.html#tanh) to our output layer.\n# \n# #### tanh Output\n# The generator has been found to perform the best with $tanh$ for the generator output, which scales the output to be between -1 and 1, instead of 0 and 1. \n# \n# <img src='assets/tanh_fn.png' width=40% />\n# \n# Recall that we also want these outputs to be comparable to the *real* input pixel values, which are read in as normalized values between 0 and 1. \n# > So, we'll also have to **scale our real input images to have pixel values between -1 and 1** when we train the discriminator. \n# \n# I'll do this in the training loop, later on.\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "matplotlib.pyplot.figure", "torch.utils.data.DataLoader", "numpy.squeeze" ] ]
apodemus/pysalt3
[ "97bb790ad7bcf1137e3ffd2a7b32840ae7167358" ]
[ "proptools/optimizetab.py" ]
[ "# -*- coding: utf-8 -*-\nimport os, sys\nimport numpy as np\n\nfrom PyQt4 import QtCore, QtGui\n\nfrom slitlets import Slitlets\nimport pyslit_optimize as opt\n\nclass OptimizeTab:\n def __init__(self, ui, default_yspacing=1., default_iter=10):\n print('loading OPT')\n self.ui = ui\n self.slitlets=Slitlets()\n self.opt_yspacing = default_yspacing\n self.opt_niter = default_iter\n \n def setoptimizer_yspacing(self):\n self.opt_yspacing = self.checkyspacing_input(self.ui.lineEditOpt_Yspacing.text())\n \n def setoptimizer_iter(self):\n self.opt_niter = self.checkniter_input(self.ui.lineEditOpt_Niter.text())\n\n def includerefstars(self):\n if self.ui.checkBoxOpt_IncRefstars.isChecked():\n nrefstars = len(np.where(self.slitlets.data['priority'] == -1)[0])\n self.ui.lineEditOpt_AllRefstars.setText(str(nrefstars))\n else:\n self.ui.lineEditOpt_AllRefstars.setText('')\n\n def setnumrefstars(self):\n print(self.ui.lineEditOpt_NumRefstars.text())\n\n \n def optimize(self): \n \"\"\"Run the optimizer program and optimize the slits\"\"\"\n msg = \"Optimize the Slitlets\"\n print(msg)\n cra = self.slitmask.center_ra\n cdec = self.slitmask.center_dec\n rotang = self.slitmask.position_angle\n equinox = 2000\n is_in_fov = np.where(self.slitlets.data['fov_flag'] == 1)[0]\n\n # jpk: this will need to be added in the next version\n# is_in_fov = np.where((self.slitlets.data['inmask_flag'] == 1) * (self.slitlets.data['fov_flag'] == 1))[0]\n\n ra = self.slitlets.data['targ_ra']\n dec = self.slitlets.data['targ_dec']\n pri = self.slitlets.data['priority']\n slen = self.slitlets.data['len1'] + self.slitlets.data['len2'] \n swid = self.slitlets.data['width']\n stilt = self.slitlets.data['tilt']\n \n Nstars_req = 0. # **** Paul: I'm not quite sure where to get this from ****\n Niter=10 # **** as above ****\n # set all inmask flags to zero before running optimiser\n #self.slitlets.emptymask()\n \n # -- only run this on objects within FOV: \n ok = is_in_fov\n if not ok.any(): \n print(\"No objects in the field of view--update mask center and run again\")\n return\n\n print(ra[ok])\n tra = ra[ok]\n tdec = dec[ok]\n tpri = pri[ok]\n tslen = slen[ok]\n tswid = swid[ok]\n tstilt = stilt[ok]\n print('running optimizer')\n \n tin_mask = opt.pyslit_optimize(cra, cdec, rotang, equinox, tra, tdec, \\\n tpri,tslen,tswid,tstilt,\\\n Niter,self.opt_yspacing, Nstars_req)\n # apply index numbers to full list:\n in_mask = ok[tin_mask]\n\n # reset all the in_mask values, otherwise the objects which should not\n # be in the optimized mask will still have a in_mask flag \n self.slitlets.data['inmask_flag'] = 0\n self.slitlets.data['collision_flag'] = 0\n\n # now add the in_mask flag to the sources which was found by the\n # optimizer \n for sid in in_mask:\n self.slitlets.addtomask(sid)\n self.updatetabs()\n\n\n def updatetabs(self):\n self.slitmask.outFoV_all()\n self.slitmask.find_collisions()\n self.slitlets.update_flags()\n# pass\n \n def checkyspacing_input(self, x):\n try:\n val = float(x)\n if val > 0:\n return val\n else:\n self.opt_yspacing = 1\n self.ui.lineEditOpt_Yspacing.setText(str(self.opt_yspacing))\n except ValueError as e:\n self.opt_yspacing = 1\n self.ui.lineEditOpt_Yspacing.setText(str(self.opt_yspacing))\n\n def checkniter_input(self,x):\n try:\n val = int(x)\n if val > 0:\n return val\n else:\n self.opt_niter = 10\n self.ui.lineEditOpt_Niter.setText(str(self.opt_niter))\n except ValueError as e:\n self.opt_niter = 10\n self.ui.lineEditOpt_Niter.setText(str(self.opt_niter))\n" ]
[ [ "numpy.where" ] ]
teuben/QAC
[ "7ec92dcdfdeb1b71dd13bd6fd47f29b634b7a488" ]
[ "src/plot.py" ]
[ "#\n# Version: 31-jan-2018\n#\n# In this version we use plot= to denote te plotfile name. in the \"au\" tools the \"figfile=\"\n# keyword is used. In imview() they use the \"out=\". Go figure for standardization, but we\n# should probably assume something standard.\n#\n# Also: plot=None could be used to not show a plot?\n#\nimport matplotlib.pyplot as plt\n\n# example figures made:\n# plot1('test1/tp.ms','aver_12.ms', 'aver_07.ms',11.0,plot='figures/plot1.png')\n# plot1('test1/tp.ms','aver_12.ms', 'aver_07.ms',200.0,plot='figures/plot1a.png')\n\n\ndef plot1(ms0=None, ms7=None, ms12=None, uvmax = 5.0, kwave=True, stride=1, plot='plot1.png'):\n \"\"\" Plotting several MS in a U-V plot\n ms0: TP (but could be any)\n ms7: 7m (single MS)\n ms12: 12m (single MS)\n\n kwave: True means converted to klambda, False means native (meters)\n stride: Take every stride'd point to plot\n\n \"\"\"\n def get_stride(uv, stride=1):\n if stride == 1:\n return uv\n (u,v) = uv\n idx = list(range(0,len(u),stride))\n return (u[idx],v[idx])\n \n (w,h) = plt.figaspect(1.0)\n plt.figure(figsize=(w,h))\n plt.xlim(-uvmax, uvmax)\n plt.ylim(-uvmax, uvmax)\n if ms0 != None:\n (u0,v0) = get_stride(qtp_getuv(ms0,kwave),stride)\n plt.scatter(u0, v0, c='b',s=1)\n if ms7 != None:\n (u7,v7) = get_stride(qtp_getuv(ms7,kwave),stride)\n plt.scatter(u7, v7, c='g',s=20)\n if ms12 != None:\n (u12,v12) = get_stride(qtp_getuv(ms12,kwave),stride)\n plt.scatter(u12,v12,c='r',s=60)\n if kwave:\n plt.xlabel(\"u (k$\\lambda$)\")\n plt.ylabel(\"v (k$\\lambda$)\")\n else:\n plt.xlabel(\"u (meter)\")\n plt.ylabel(\"v (meter)\")\n plt.savefig(plot)\n plt.show()\n\ndef plot1a(mslist, uvmax = 5.0, kwave=True, stride=1, plot='plot1a.png'):\n \"\"\" Plotting several MS as a heat map in a U-V plot\n mslist: List of MS\n\n kwave: True means converted to klambda, False means native (meters)\n stride: Take every stride'd point to plot\n\n @todo CASA's matplotlib doesn't seem to have hist2d()\n\n \"\"\"\n def get_stride(uv, stride=1):\n if stride == 1:\n return uv\n (u,v) = uv\n idx = list(range(0,len(u),stride))\n return (u[idx],v[idx])\n \n from matplotlib.colors import LogNorm\n \n (w,h) = plt.figaspect(1.0)\n plt.figure(figsize=(w,h))\n plt.xlim(-uvmax, uvmax)\n plt.ylim(-uvmax, uvmax)\n u = np.array([])\n v = np.array([])\n for ms in mslist:\n (u0,v0) = get_stride(qtp_getuv(ms,kwave),stride)\n u = np.append(u, u0)\n v = np.append(v, v0)\n # casa's plt doesn't have hist2d yet\n #plt.hist2d(u,v,bins=300, norm=LogNorm())\n #plt.colorbar()\n if kwave:\n plt.xlabel(\"u (k$\\lambda$)\")\n plt.ylabel(\"v (k$\\lambda$)\")\n else:\n plt.xlabel(\"u (meter)\")\n plt.ylabel(\"v (meter)\")\n plt.savefig(plot)\n plt.show()\n # since this fails, write the (u,v)'s to a file and use a more modern python\n if True:\n np.savetxt(\"plot1a.tab\",(u,v))\n # (u,v) = np.loadtxt(\"plot1a.tab\")\n\ndef plot1b(tab, uvmax = 5.0, bins=256, kwave=True, plot='plot1b.png'):\n \"\"\" Plotting several MS as a heat map in a U-V plot\n tab: ascii table from loadtxt/savetxt via plot1a()\n\n kwave: True means converted to klambda, False means native (meters)\n\n @todo CASA's matplotlib doesn't seem to have hist2d()\n\n \"\"\"\n (u,v) = np.loadtxt(tab)\n print(u.min(),v.min(),u.max(),v.max())\n u = np.append(u,-u)\n v = np.append(v,-v)\n\n from matplotlib.colors import LogNorm\n \n (w,h) = plt.figaspect(1.0)\n plt.figure(figsize=(w,h))\n plt.hist2d(u,v,bins=bins, norm=LogNorm())\n # plt.colorbar()\n plt.xlim(-uvmax, uvmax)\n plt.ylim(-uvmax, uvmax)\n if kwave:\n plt.xlabel(\"u (k$\\lambda$)\")\n plt.ylabel(\"v (k$\\lambda$)\")\n else:\n plt.xlabel(\"u (meter)\")\n plt.ylabel(\"v (meter)\")\n plt.savefig(plot)\n plt.show()\n \ndef plot2(plot2file, f1=None, f2=None, plot='plot2.png'):\n \"\"\" Plotting flux as function of channel for various situations\n This is normally used to build up composite plots\n \"\"\"\n plt.figure()\n _tmp = imstat(plot2file,axes=[0,1])\n if 'flux' in _tmp:\n flux = _tmp['flux']/1000.0\n totalflux = imstat(plot2file)['flux'][0]/1000.0\n else:\n flux = _tmp['sum']/1000.0\n totalflux = imstat(plot2file)['sum'][0]/1000.0\n rms = _tmp['rms']/1000.0\n chan = np.arange(len(flux))\n plt.plot(chan,flux,c='r',label='TP image')\n if f1 != None:\n plt.plot(chan,f1,c='g')\n if f2 != None:\n plt.plot(chan,f2,c='b')\n zero = 0.0 * flux\n plt.plot(chan,zero,c='black')\n plt.ylabel('Flux/1000')\n plt.xlabel('Channel')\n plt.title('%s Total flux/1000: %f' % (plot2file,totalflux))\n plt.legend()\n plt.savefig(plot)\n plt.show()\n return flux\n\ndef plot2a(f, title='Flux Comparison', plot='plot2a.png', label=[], dv=1.0, v=[]):\n \"\"\" Plotting flux as function of channel for various situations\n f = list of equal sized arrays of fluxes\n Also prints out the flux sums, using the dv that needs to be given.\n \"\"\"\n plt.figure()\n chan = np.arange(len(f[0]))\n if len(label) == 0:\n labels=list(range(len(f)))\n for n in range(len(f)):\n labels[n] = \"%d\" % (n+1)\n else:\n labels = label\n \n for (fi,n) in zip(f,list(range(len(f)))):\n if len(v) > 0:\n plt.plot(v[n],fi,label=labels[n])\n dv = v[n][1]-v[n][0]\n else:\n plt.plot(chan,fi,label=labels[n])\n print(\"Flux[%d]: %g Jy (* assumed %g km/s) %s\" % (n+1,fi.sum()*dv, dv, labels[n]))\n zero = 0.0 * f[0]\n plt.ylabel('Flux')\n if len(v) > 0:\n plt.plot(v[0],zero,c='black')\n plt.xlabel('Velocity (km/s)')\n else:\n plt.plot(chan,zero,c='black')\n plt.xlabel('Channel')\n plt.title(title)\n #plt.legend(loc='lower center')\n plt.legend(loc='best') # fontsize='x-small'\n plt.savefig(plot)\n plt.show()\n return \n\n\ndef plot3(mslist, log=True, kwave=True, plot='plot3.png'):\n \"\"\" Plotting several MS in a UVD - AMP plot\n\n mlist: list of MS\n log: logaritmic scale for AMP's\n kwave: True means converted to klambda, False means native (meters) \n\n This routine will probably run out of memory for large files, it needs to stream and collect\n due to keeping nchan \n \n \"\"\"\n def my_getamp(ms, log=True):\n tb.open(ms)\n data = np.abs(tb.getcol('DATA')[0,:,:]) # -> data[nchan,nvis]\n amp = data.max(axis=0)\n tb.close()\n if log: amp = np.log10(amp)\n print(\"AMP min/max = \",amp.min(),amp.max())\n return amp\n\n colors = ['r', 'g', 'b']\n \n plt.figure()\n if type(mslist) == str:\n mslist = [mslist]\n for (ms,c) in zip(mslist,colors):\n if iscasa(ms):\n print(\"Processing \",ms)\n (u0,v0) = qtp_getuv(ms,kwave)\n uvd = np.sqrt(u0*u0+v0*v0)\n amp = my_getamp(ms,log)\n plt.scatter(uvd,amp,c=c,label=ms)\n else:\n print(\"Skipping \",ms)\n if kwave:\n plt.xlabel(\"uvdistance (k$\\lambda$)\")\n else:\n plt.xlabel(\"uvdistance (meter)\")\n if log:\n plt.ylabel(\"log(amp[channel_max])\")\n else:\n plt.ylabel(\"amp[channel_max]\")\n plt.legend()\n plt.savefig(plot)\n plt.show()\n\n\ndef plot4(mslist, bin=None, kwave=True, plot='plot4.png'):\n \"\"\" Plotting several MS in a UVD - WEIGHT plot\n\n mslist: list of MS\n bin: if given, this is the binsize in kLambda for ring weight density\n kwave: True in kLambda, False in native meters\n\n \"\"\"\n def my_getwt(ms):\n tb.open(ms)\n data = tb.getcol('WEIGHT')[:,:] # -> data[npol,nvis]\n tb.close()\n return data\n\n colors = ['r', 'g', 'b']\n \n plt.figure()\n if type(mslist) == str:\n mslist = [mslist]\n for (ms,c) in zip(mslist,colors):\n if iscasa(ms):\n print(\"Processing \",ms)\n (u0,v0) = qtp_getuv(ms,kwave)\n uvd = np.sqrt(u0*u0+v0*v0) # in kLambda (or meters)\n wt = my_getwt(ms)\n print(\"PJT\",wt.shape)\n if bin == None:\n # only do the first pol\n plt.scatter(uvd,wt[0,:],c=c,label=ms)\n # plt.scatter(uvd,wt[1,:],c=c,label=ms)\n else:\n uvbins = np.arange(0.0,uvd.max() + bin, bin)\n #uvbins = np.arange(2.0,6.0,1.0)\n print(uvbins)\n print(\"UVD max\",uvd.max())\n wt = wt[0,:]\n digit = np.digitize(uvd,uvbins)\n if True:\n # weight density\n wt_bin = [wt[digit == i].sum() for i in range(1,len(uvbins))]\n print(wt_bin)\n print(len(uvbins),len(digit),len(wt_bin))\n # @todo check if i'm not off by 1/2 bin\n uvarea = np.diff(uvbins*uvbins)\n wt_bin = wt_bin / uvarea\n else:\n # mean weight per uvbin\n wt_bin = [wt[digit == i].mean() for i in range(1,len(uvbins))]\n print(wt_bin)\n print(len(uvbins),len(digit),len(wt_bin))\n wt_bin = np.log10(wt_bin)\n plt.plot(uvbins[1:],wt_bin,drawstyle='steps-mid')\n else:\n print(\"Skipping \",ms)\n if kwave:\n plt.xlabel(\"uvdistance (k$\\lambda$)\")\n else:\n plt.xlabel(\"uvdistance (meter)\")\n if bin == None:\n plt.ylabel(\"weight[channel_max]\")\n else:\n plt.ylabel(\"weight density\")\n plt.legend()\n plt.savefig(plot)\n plt.show()\n\ndef plot5(image, box=None, plot='plot5.png'):\n \"\"\" Plotting min,max,rms as function of channel\n \n box xmin,ymin,xmax,ymax defaults to whole area\n\n A useful way to check the the mean RMS at the first\n or last 10 channels is:\n\n imstat(image,axes=[0,1])['rms'][:10].mean()\n imstat(image,axes=[0,1])['rms'][-10:].mean()\n \n \"\"\"\n plt.figure()\n _tmp = imstat(image,axes=[0,1],box=box)\n fmin = _tmp['min']\n fmax = _tmp['max']\n frms = _tmp['rms']\n chan = np.arange(len(fmin))\n f = 0.5 * (fmax - fmin) / frms\n plt.plot(chan,fmin,c='r',label='min')\n plt.plot(chan,fmax,c='g',label='max')\n plt.plot(chan,frms,c='b',label='rms')\n plt.plot(chan,f, c='black', label='<peak>/rms')\n zero = 0.0 * frms\n plt.plot(chan,zero,c='black')\n plt.ylabel('Flux')\n plt.xlabel('Channel')\n plt.title('%s Min/Max/RMS' % (image))\n plt.legend()\n plt.savefig(plot)\n plt.show()\n\ndef plot6(imlist, bins=50, range=None, log=False, alpha=[1, 0.3, 0.1], box=None, plot='plot6.png'):\n \"\"\" Plotting histograms on top of each other, nice for comparison\n\n imlist list of images\n box='xmin,ymin,xmax,ymax' is the only syntax allowed here\n \"\"\"\n def mybox(box):\n a = box.split(',')\n if len(a) != 4:\n return (0,0,0,0)\n xmin = int(a[0])\n ymin = int(a[1])\n xmax = int(a[2])\n ymax = int(a[3])\n return (xmin,ymin,xmax,ymax)\n \n plt.figure()\n for (i,a) in zip(imlist,alpha):\n data = ia.open(i)\n if box == None:\n data = ia.getchunk().ravel()\n else:\n (xmin,ymin,xmax,ymax) = mybox(box)\n if xmin==0 and xmax==0:\n print(\"Warning: bad box \",box)\n data = ia.getchunk().ravel()\n else:\n data = ia.getchunk([xmin,ymin],[xmax,ymax]).ravel()\n ia.close()\n plt.hist(data,bins=bins,range=range,log=log,alpha=a)\n plt.savefig(plot)\n plt.show()\n \ndef plot7(basename, idx=0, channel=0, box=None, range=None, plot='plot7.png', residual = True, verbose=False):\n \"\"\"\n composite for a channel in a cube, needs to be flux flat since we histograms\n\n basename e.g. int_2.image tpint_2.image tpint_2.tweak.image\n int_2.residual tpint_2.residual tpint_2.tweak.residual\n \"\"\"\n def plotim(image, channel, drange, box=None):\n \"\"\"\n \"\"\"\n if image == None:\n return\n if not QAC.iscasa(image):\n return\n tb.open(image)\n d1 = tb.getcol(\"map\").squeeze()\n tb.close()\n nx = d1.shape[0]\n ny = d1.shape[1]\n if len(d1.shape) == 2:\n d3 = np.flipud(np.rot90(d1.reshape((nx,ny)))) \n else:\n d2 = d1[:,:,channel]\n d3 = np.flipud(np.rot90(d2.reshape((nx,ny))))\n if box != None:\n data = d3[box[1]:box[3],box[0]:box[2]]\n else:\n data = d3\n alplot = plt.imshow(data, origin='lower', vmin = drange[0], vmax = drange[1])\n plt.title(\"%s[%d]\" % (image,channel))\n print((\"%s[%d] %g %g\" % (image,channel,data.min(),data.max())))\n\n def plothi(image, channel, drange, bins=32, box=None):\n \"\"\"\n simple histogram, overlaying seleted channel on full cube histogram\n \"\"\"\n if image == None:\n return\n if not QAC.iscasa(image):\n return\n\n tb.open(image)\n d1 = tb.getcol(\"map\").squeeze()\n tb.close()\n nx = d1.shape[0]\n ny = d1.shape[1]\n d2 = d1[:,:,channel]\n # @todo mask out 0's or masked or NaN\n plt.hist(d1.ravel(),bins=bins,range=drange,log=True) # alpha=a\n plt.hist(d2.ravel(),bins=bins,range=drange,log=True) # alpha=a\n \n im0 = []\n im0.append( basename + '/int%s.image.pbcor' % QAC.label(idx) )\n im0.append( basename + '/tpint%s.image.pbcor' % QAC.label(idx) )\n im0.append( basename + '/tpint%s.tweak.image.pbcor' % QAC.label(idx) )\n im0.append( basename + '/int%s.residual' % QAC.label(idx) )\n im0.append( basename + '/tpint%s.residual' % QAC.label(idx) )\n im0.append( basename + '/tpint%s.tweak.residual' % QAC.label(idx) )\n\n dmin = dmax = None\n print(im0)\n print((len(im0)))\n # for i in range(len(im0)):\n for i in [0,1,2,3,4,5]:\n if QAC.iscasa(im0[i]):\n if verbose: qac_stats(im0[i])\n # if range == None:\n if True:\n h = imstat(im0[i])\n if dmin == None:\n dmin = h['min']\n dmax = h['max']\n else:\n if h['min'] < dmin: dmin = h['min']\n if h['max'] > dmax: dmax = h['max']\n else:\n print(('%s failed' % im0[i]))\n im0[i] = None\n if range == None:\n qprint(\"Global data min/max = %g %g \" % (dmin,dmax))\n drange = [dmin,dmax]\n else:\n drange = range\n\n bins = 100\n\n fig = plt.figure()\n plt.subplot(3,3,1); plotim(im0[0],channel,drange,box)\n plt.subplot(3,3,2); plotim(im0[1],channel,drange,box)\n plt.subplot(3,3,3); plotim(im0[2],channel,drange,box)\n plt.colorbar() \n plt.subplot(3,3,4); plotim(im0[3],channel,drange,box)\n plt.subplot(3,3,5); plotim(im0[4],channel,drange,box)\n plt.subplot(3,3,6); plotim(im0[5],channel,drange,box)\n plt.colorbar()\n if not residual:\n plt.subplot(3,3,7); plothi(im0[0],channel,[dmin,dmax],bins,box)\n plt.subplot(3,3,8); plothi(im0[1],channel,[dmin,dmax],bins,box)\n plt.subplot(3,3,9); plothi(im0[2],channel,[dmin,dmax],bins,box)\n else:\n plt.subplot(3,3,7); plothi(im0[3],channel,drange,bins,box)\n plt.subplot(3,3,8); plothi(im0[4],channel,drange,bins,box)\n plt.subplot(3,3,9); plothi(im0[5],channel,drange,bins,box)\n\n \n plt.savefig(plot)\n plt.show()\n\ndef plot8(im1, im2, box=None, range=None, plot='plot8.png', verbose=False):\n \"\"\"\n overlay two histograms\n im1\n im2\n\n \"\"\"\n #\n tb.open(im1)\n d1 = tb.getcol(\"map\").squeeze()\n tb.close()\n nx1 = d1.shape[0]\n ny1 = d1.shape[1]\n #\n tb.open(im2)\n d2 = tb.getcol(\"map\").squeeze()\n tb.close()\n nx2 = d1.shape[0]\n ny2 = d1.shape[1]\n\n dmin = dmax = None\n for i in [im1,im2]:\n if verbose: qac_stats(i)\n h = imstat(i)\n if dmin == None:\n dmin = h['min']\n dmax = h['max']\n else:\n if h['min'] < dmin: dmin = h['min']\n if h['max'] > dmax: dmax = h['max']\n if range == None:\n print((\"Global data min/max = %g %g \" % (dmin,dmax)))\n drange = [dmin,dmax]\n else:\n drange = range\n\n bins = 100\n\n fig = plt.figure()\n plt.hist(d1.ravel(),bins=bins,range=drange,log=True) # alpha=a\n plt.hist(d2.ravel(),bins=bins,range=drange,log=True) # alpha=a\n\n \n plt.savefig(plot)\n plt.show()\n \n" ]
[ [ "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.hist", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow", "matplotlib.pyplot.figaspect", "matplotlib.colors.LogNorm", "matplotlib.pyplot.subplot" ] ]
Inkln/catalyst
[ "37ff273a223df3bc70411cd79a79d1b038ba8f9b", "37ff273a223df3bc70411cd79a79d1b038ba8f9b" ]
[ "catalyst/contrib/nn/criterion/triplet.py", "tests/_tests_scripts/dl_z_mvp_mnist.py" ]
[ "# flake8: noqa\nfrom typing import List, TYPE_CHECKING, Union\n\nimport torch\nfrom torch import nn, Tensor\nfrom torch.nn import TripletMarginLoss\n\nfrom catalyst.contrib.nn.criterion.functional import triplet_loss\nfrom catalyst.data.utils import convert_labels2list\n\nif TYPE_CHECKING:\n from catalyst.data.sampler_inbatch import IInbatchTripletSampler\n\nTORCH_BOOL = torch.bool if torch.__version__ > \"1.1.0\" else torch.ByteTensor\n\n\nclass TripletLoss(nn.Module):\n \"\"\"Triplet loss with hard positive/negative mining.\n\n Adapted from: https://github.com/NegatioN/OnlineMiningTripletLoss\n \"\"\"\n\n def __init__(self, margin: float = 0.3):\n \"\"\"\n Args:\n margin (float): margin for triplet\n \"\"\"\n super().__init__()\n self.margin = margin\n self.ranking_loss = nn.MarginRankingLoss(margin=margin)\n\n def _pairwise_distances(self, embeddings, squared=False):\n \"\"\"Compute the 2D matrix of distances between all the embeddings.\n\n Args:\n embeddings: tensor of shape (batch_size, embed_dim)\n squared (bool): if true, output is the pairwise squared euclidean\n distance matrix. If false, output is the pairwise euclidean\n distance matrix\n\n Returns:\n torch.Tensor: pairwise matrix of size (batch_size, batch_size)\n \"\"\"\n # Get squared L2 norm for each embedding.\n # We can just take the diagonal of `dot_product`.\n # This also provides more numerical stability\n # (the diagonal of the result will be exactly 0).\n # shape (batch_size,)\n square = torch.mm(embeddings, embeddings.t())\n diag = torch.diag(square)\n\n # Compute the pairwise distance matrix as we have:\n # ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2\n # shape (batch_size, batch_size)\n distances = diag.view(-1, 1) - 2.0 * square + diag.view(1, -1)\n\n # Because of computation errors, some distances\n # might be negative so we put everything >= 0.0\n distances[distances < 0] = 0\n\n if not squared:\n # Because the gradient of sqrt is infinite\n # when distances == 0.0 (ex: on the diagonal)\n # we need to add a small epsilon where distances == 0.0\n mask = distances.eq(0).float()\n distances = distances + mask * 1e-16\n\n distances = (1.0 - mask) * torch.sqrt(distances)\n\n return distances\n\n def _get_anchor_positive_triplet_mask(self, labels):\n \"\"\"\n Return a 2D mask where mask[a, p] is True\n if a and p are distinct and have same label.\n\n Args:\n labels: tf.int32 `Tensor` with shape [batch_size]\n\n Returns:\n torch.Tensor: mask with shape [batch_size, batch_size]\n \"\"\"\n indices_equal = torch.eye(labels.size(0)).type(torch.bool)\n\n # labels and indices should be on\n # the same device, otherwise - exception\n indices_equal = indices_equal.to(\"cuda\" if labels.is_cuda else \"cpu\")\n\n # Check that i and j are distinct\n\n indices_equal = indices_equal.type(TORCH_BOOL)\n indices_not_equal = ~indices_equal\n\n # Check if labels[i] == labels[j]\n # Uses broadcasting where the 1st argument\n # has shape (1, batch_size) and the 2nd (batch_size, 1)\n labels_equal = labels.unsqueeze(0) == labels.unsqueeze(1)\n\n return labels_equal & indices_not_equal\n\n def _get_anchor_negative_triplet_mask(self, labels):\n \"\"\"Return 2D mask where mask[a, n] is True if a and n have same label.\n\n Args:\n labels: tf.int32 `Tensor` with shape [batch_size]\n\n Returns:\n torch.Tensor: mask with shape [batch_size, batch_size]\n \"\"\"\n # Check if labels[i] != labels[k]\n # Uses broadcasting where the 1st argument\n # has shape (1, batch_size) and the 2nd (batch_size, 1)\n return ~(labels.unsqueeze(0) == labels.unsqueeze(1))\n\n def _batch_hard_triplet_loss(\n self, embeddings, labels, margin, squared=True,\n ):\n \"\"\"\n Build the triplet loss over a batch of embeddings.\n For each anchor, we get the hardest positive and\n hardest negative to form a triplet.\n\n Args:\n labels: labels of the batch, of size (batch_size,)\n embeddings: tensor of shape (batch_size, embed_dim)\n margin: margin for triplet loss\n squared: Boolean. If true, output is the pairwise squared\n euclidean distance matrix. If false, output is the\n pairwise euclidean distance matrix.\n\n Returns:\n torch.Tensor: scalar tensor containing the triplet loss\n \"\"\"\n # Get the pairwise distance matrix\n pairwise_dist = self._pairwise_distances(embeddings, squared=squared)\n\n # For each anchor, get the hardest positive\n # First, we need to get a mask for every valid\n # positive (they should have same label)\n mask_anchor_positive = self._get_anchor_positive_triplet_mask(\n labels\n ).float()\n\n # We put to 0 any element where (a, p) is not valid\n # (valid if a != p and label(a) == label(p))\n anchor_positive_dist = mask_anchor_positive * pairwise_dist\n\n # shape (batch_size, 1)\n hardest_positive_dist, _ = anchor_positive_dist.max(1, keepdim=True)\n\n # For each anchor, get the hardest negative\n # First, we need to get a mask for every valid negative\n # (they should have different labels)\n mask_anchor_negative = self._get_anchor_negative_triplet_mask(\n labels\n ).float()\n\n # We add the maximum value in each row\n # to the invalid negatives (label(a) == label(n))\n max_anchor_negative_dist, _ = pairwise_dist.max(1, keepdim=True)\n anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (\n 1.0 - mask_anchor_negative\n )\n\n # shape (batch_size,)\n hardest_negative_dist, _ = anchor_negative_dist.min(1, keepdim=True)\n\n # Combine biggest d(a, p) and smallest d(a, n) into final triplet loss\n tl = hardest_positive_dist - hardest_negative_dist + margin\n tl[tl < 0] = 0\n loss = tl.mean()\n\n return loss\n\n def forward(self, embeddings, targets):\n \"\"\"Forward propagation method for the triplet loss.\n\n Args:\n embeddings: tensor of shape (batch_size, embed_dim)\n targets: labels of the batch, of size (batch_size,)\n\n Returns:\n torch.Tensor: scalar tensor containing the triplet loss\n \"\"\"\n return self._batch_hard_triplet_loss(embeddings, targets, self.margin)\n\n\nclass TripletLossV2(nn.Module):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n\n def __init__(self, margin=0.3):\n \"\"\"\n Args:\n margin (float): margin for triplet.\n \"\"\"\n super().__init__()\n self.margin = margin\n\n def forward(self, embeddings, targets):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n return triplet_loss(embeddings, targets, margin=self.margin)\n\n\nclass TripletPairwiseEmbeddingLoss(nn.Module):\n \"\"\"TripletPairwiseEmbeddingLoss – proof of concept criterion.\n\n Still work in progress.\n\n @TODO: Docs. Contribution is welcome.\n \"\"\"\n\n def __init__(self, margin: float = 0.3, reduction: str = \"mean\"):\n \"\"\"\n Args:\n margin (float): margin parameter\n reduction (str): criterion reduction type\n \"\"\"\n super().__init__()\n self.margin = margin\n self.reduction = reduction or \"none\"\n\n def forward(self, embeddings_pred, embeddings_true):\n \"\"\"\n Work in progress.\n\n Args:\n embeddings_pred: predicted embeddings\n with shape [batch_size, embedding_size]\n embeddings_true: true embeddings\n with shape [batch_size, embedding_size]\n\n Returns:\n torch.Tensor: loss\n \"\"\"\n device = embeddings_pred.device\n # s - state space\n # d - embeddings space\n # a - action space\n # [batch_size, embedding_size] x [batch_size, embedding_size]\n # -> [batch_size, batch_size]\n pairwise_similarity = torch.einsum(\n \"se,ae->sa\", embeddings_pred, embeddings_true\n )\n bs = embeddings_pred.shape[0]\n batch_idx = torch.arange(bs, device=device)\n negative_similarity = pairwise_similarity + torch.diag(\n torch.full([bs], -(10 ** 9), device=device)\n )\n # TODO argsort, take k worst\n hard_negative_ids = negative_similarity.argmax(dim=-1)\n\n negative_similarities = pairwise_similarity[\n batch_idx, hard_negative_ids\n ]\n positive_similarities = pairwise_similarity[batch_idx, batch_idx]\n loss = torch.relu(\n self.margin - positive_similarities + negative_similarities\n )\n if self.reduction == \"mean\":\n loss = torch.sum(loss) / bs\n elif self.reduction == \"sum\":\n loss = torch.sum(loss)\n return loss\n\n\nclass TripletMarginLossWithSampler(nn.Module):\n \"\"\"\n This class combines in-batch sampling of triplets and\n default TripletMargingLoss from PyTorch.\n \"\"\"\n\n def __init__(\n self, margin: float, sampler_inbatch: \"IInbatchTripletSampler\"\n ):\n \"\"\"\n Args:\n margin: margin value\n sampler_inbatch: sampler for forming triplets inside the batch\n \"\"\"\n super().__init__()\n self._sampler_inbatch = sampler_inbatch\n self._triplet_margin_loss = TripletMarginLoss(margin=margin)\n\n def forward(\n self, features: Tensor, labels: Union[Tensor, List[int]]\n ) -> Tensor:\n \"\"\"\n Args:\n features: features with the shape of [batch_size, features_dim]\n labels: labels of samples having batch_size elements\n\n Returns: loss value\n\n \"\"\"\n labels_list = convert_labels2list(labels)\n\n (\n features_anchor,\n features_positive,\n features_negative,\n ) = self._sampler_inbatch.sample(features=features, labels=labels_list)\n\n loss = self._triplet_margin_loss(\n anchor=features_anchor,\n positive=features_positive,\n negative=features_negative,\n )\n return loss\n\n\n__all__ = [\n \"TripletLoss\",\n \"TripletLossV2\",\n \"TripletPairwiseEmbeddingLoss\",\n \"TripletMarginLossWithSampler\",\n]\n", "# flake8: noqa\nimport os\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\n\nfrom catalyst import dl\nfrom catalyst.contrib.datasets import MNIST\nfrom catalyst.data.cv import ToTensor\nfrom catalyst.utils import metrics\n\nmodel = torch.nn.Linear(28 * 28, 10)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.02)\n\nloaders = {\n \"train\": DataLoader(\n MNIST(\"./data\", train=True, download=True, transform=ToTensor()),\n batch_size=32,\n ),\n}\n\n\nclass CustomRunner(dl.Runner):\n def predict_batch(self, batch):\n # model inference step\n return self.model(batch[0].to(self.device).view(batch[0].size(0), -1))\n\n def _handle_batch(self, batch):\n # model train/valid step\n x, y = batch\n y_hat = self.model(x.view(x.size(0), -1))\n\n loss = F.cross_entropy(y_hat, y)\n accuracy01, accuracy03 = metrics.accuracy(y_hat, y, topk=(1, 3))\n self.batch_metrics.update(\n {\"loss\": loss, \"accuracy01\": accuracy01, \"accuracy03\": accuracy03}\n )\n\n if self.is_train_loader:\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n\ndef main():\n runner = CustomRunner()\n # model training\n runner.train(\n model=model,\n optimizer=optimizer,\n loaders=loaders,\n logdir=\"./logs\",\n num_epochs=5,\n verbose=True,\n load_best_on_end=True,\n check=True,\n )\n # model inference\n for prediction in runner.predict_loader(loader=loaders[\"train\"]):\n assert prediction.detach().cpu().numpy().shape[-1] == 10\n # model tracing\n traced_model = runner.trace(loader=loaders[\"train\"])\n\n\nif __name__ == \"__main__\":\n if os.getenv(\"USE_APEX\", \"0\") == \"0\" and os.getenv(\"USE_DDP\", \"0\") == \"0\":\n main()\n" ]
[ [ "torch.nn.MarginRankingLoss", "torch.nn.TripletMarginLoss", "torch.sqrt", "torch.relu", "torch.arange", "torch.einsum", "torch.full", "torch.diag", "torch.sum" ], [ "torch.nn.Linear", "torch.nn.functional.cross_entropy" ] ]
openPMD/openPMD-CCD
[ "ab529c47d310da72405fee3daa1c3e246c2c1265" ]
[ "tests/test_write.py" ]
[ "\"\"\"\nThis test file is part of the openPMD-CCD.\n\nCopyright 2020 openPMD contributors\nAuthors: Axel Huebl\nLicense: BSD-3-Clause-LBNL\n\"\"\"\nfrom PIL import Image\nimport io\nimport numpy as np\nimport os\nimport pytest\n\nfrom openpmd_ccd import CCD\n\n\nFIXTURE_DIR = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'data',\n)\n\[email protected](FIXTURE_DIR)\ndef test_write(datafiles):\n \"\"\" test writes from the README \"\"\"\n\n scan = CCD(\"defaultCam_scan001.h5\", overwrite=True,\n name=\"Go Pro\", model=\"HERO8 Black\", serial=\"12345678\",\n operator=\"Axel Huebl <[email protected]>\",\n # resolution=None, roi=None, exposure_time=None\n )\n\n image_path = str(datafiles.listdir()[0])\n\n # add by bytes\n with io.BytesIO() as image_data:\n im = Image.open(image_path)\n im.save(image_data, im.format)\n image_data.seek(0)\n im_numpy = np.array(im)\n for image_number in range(10):\n scan.add(image_number, image_data=im)\n for image_number in range(10, 20):\n scan.add(image_number, image_data=image_data)\n for image_number in range(30, 40):\n scan.add(image_number, image_data=im_numpy)\n\n # add by list of list of uints (LabView)\n im_list = [[1, 2, 3], [4, 5, 6]]\n for image_number in range(40, 50):\n scan.add(image_number, image_data=im_list)\n\n # add by path\n for image_number in range(50, 60):\n scan.add(image_number, image_path)\n \n # scan.recalibrate(...)\n\n scan.close()\n del scan\n" ]
[ [ "numpy.array" ] ]
Morgan-Stanley/Testplan
[ "9374d6e0da6ae9aa7a1b5e08b42cd21993485837" ]
[ "tests/unit/testplan/testing/multitest/test_result.py" ]
[ "\"\"\"\nUnit tests for the testplan.testing.multitest.result module.\n\"\"\"\n\nimport collections\nimport copy\nimport hashlib\nimport inspect\nimport os\nimport re\nfrom unittest import mock\n\nimport matplotlib\nimport pytest\nimport matplotlib.pyplot as plot\n\nfrom testplan.common.utils import comparison\nfrom testplan.common.utils import testing\nfrom testplan.common.utils import callable\nfrom testplan.common.utils import path as path_utils\n\nfrom testplan.testing.multitest import result as result_mod\nfrom testplan.testing.multitest import MultiTest\nfrom testplan.testing.multitest.suite import testcase, testsuite\n\nmatplotlib.use(\"agg\")\n\n\ndef get_line_no(obj, rel_pos):\n \"\"\"\n Extracts absolute line number based on object and relative position.\n \"\"\"\n _, start = inspect.getsourcelines(obj)\n return start + rel_pos\n\n\ndef helper(result, description=None):\n result.less(1, 2, description=description)\n\n\n@result_mod.report_target\ndef intermediary(result, description=None):\n helper(result, description=description)\n\n\ndef test_group_marking():\n \"\"\"\n Tests, at result object level, if marking works as expected.\n \"\"\"\n result = result_mod.Result()\n result.equal(1, 1)\n assert result.entries.pop().line_no == get_line_no(test_group_marking, 5)\n helper(result)\n assert result.entries.pop().line_no == get_line_no(helper, 1)\n intermediary(result)\n assert result.entries.pop().line_no == get_line_no(intermediary, 2)\n\n\n@testsuite\nclass GroupMarking:\n @testcase\n def case(self, env, result):\n result.equal(1, 1, description=\"A\")\n helper(result, description=\"B\")\n intermediary(result, description=\"C\")\n\n\n@testsuite\nclass ParametrizedGroupMarking:\n @testcase(parameters=(0, 1))\n def case(self, env, result, val):\n result.equal(val, 1, description=f\"A{val}\")\n helper(result, description=f\"B{val}\")\n intermediary(result, description=f\"C{val}\")\n\n\ndef pre_fn(self, env, result):\n result.equal(1, 1, description=\"Pre\")\n\n\ndef post_fn(self, env, result):\n result.equal(1, 1, description=\"Post\")\n\n\n@testsuite\nclass PrePostTestcaseMarking:\n @callable.pre(pre_fn)\n @callable.post(post_fn)\n @testcase\n def case(self, env, result):\n result.equal(1, 1, description=\"Case\")\n\n\[email protected](\"flag\", [True, False])\ndef test_group_marking_multitest(mockplan, flag):\n \"\"\"\n Tests, at MultiTest-level, if marking works as expected.\n \"\"\"\n test = MultiTest(\n name=\"GroupMarking\",\n suites=[GroupMarking()],\n testcase_report_target=flag,\n )\n test.cfg.parent = mockplan.cfg\n test.run()\n assertions = {\n entry[\"description\"]: entry\n for entry in test.report.flatten()\n if isinstance(entry, dict) and entry[\"meta_type\"] == \"assertion\"\n }\n expected = {\n \"A\": get_line_no(GroupMarking.case, 2),\n \"B\": get_line_no(GroupMarking.case, 3)\n if flag\n else get_line_no(helper, 1),\n \"C\": get_line_no(intermediary, 2),\n }\n for desc, line_no in expected.items():\n assert assertions[desc][\"line_no\"] == line_no\n\n\[email protected](\"flag\", [True, False])\ndef test_parametrized_group_marking_multitest(mockplan, flag):\n \"\"\"\n Tests, at MultiTest-level, if marking works as expected\n for parametrized testcases.\n \"\"\"\n test = MultiTest(\n name=\"ParametrizedGroupMarking\",\n suites=[ParametrizedGroupMarking()],\n testcase_report_target=flag,\n )\n test.cfg.parent = mockplan.cfg\n test.run()\n assertions = {\n entry[\"description\"]: entry\n for entry in test.report.flatten()\n if isinstance(entry, dict) and entry[\"meta_type\"] == \"assertion\"\n }\n expected = {\n \"A0\": get_line_no(ParametrizedGroupMarking.case, 2),\n \"B0\": get_line_no(ParametrizedGroupMarking.case, 3)\n if flag\n else get_line_no(helper, 1),\n \"C0\": get_line_no(intermediary, 2),\n }\n expected.update(\n {\n \"A1\": expected[\"A0\"],\n \"B1\": expected[\"B0\"],\n \"C1\": expected[\"C0\"],\n }\n )\n for desc, line_no in expected.items():\n assert assertions[desc][\"line_no\"] == line_no\n\n\[email protected](\"flag\", [True, False])\ndef test_parametrized_group_marking_multitest(mockplan, flag):\n \"\"\"\n Tests, at MultiTest-level, if marking works as expected\n for testcase which is decorated by other functions.\n \"\"\"\n test = MultiTest(\n name=\"PrePostTestcaseMarking\",\n suites=[PrePostTestcaseMarking()],\n testcase_report_target=flag,\n )\n test.cfg.parent = mockplan.cfg\n test.run()\n assertions = {\n entry[\"description\"]: entry\n for entry in test.report.flatten()\n if isinstance(entry, dict) and entry[\"meta_type\"] == \"assertion\"\n }\n expected = {\n \"Pre\": get_line_no(pre_fn, 1),\n \"Case\": get_line_no(PrePostTestcaseMarking.case, 4),\n \"Post\": get_line_no(post_fn, 1),\n }\n for desc, line_no in expected.items():\n assert assertions[desc][\"line_no\"] == line_no\n\n\n@testsuite\nclass AssertionOrder:\n @testcase\n def case(self, env, result):\n summary = result.subresult()\n first = result.subresult()\n second = result.subresult()\n\n second.true(True, \"AssertionSecond\")\n\n result.true(True, \"AssertionMain1\")\n result.true(True, \"AssertionMain2\")\n\n first.true(True, \"AssertionFirst1\")\n first.true(True, \"AssertionFirst2\")\n\n summary.append(first)\n result.true(first.passed, \"Report passed so far.\")\n if first.passed:\n summary.append(second)\n\n result.prepend(summary)\n\n\ndef test_assertion_order(mockplan):\n \"\"\"Verify ordered assertion entries in test report.\"\"\"\n mtest = MultiTest(name=\"AssertionsOrder\", suites=[AssertionOrder()])\n mtest.cfg.parent = mockplan.cfg\n mtest.run()\n\n expected = [\n \"AssertionFirst1\",\n \"AssertionFirst2\",\n \"AssertionSecond\",\n \"AssertionMain1\",\n \"AssertionMain2\",\n \"Report passed so far.\",\n ]\n # pylint: disable=invalid-sequence-index\n assertions = [\n entry\n for entry in mtest.report.flatten()\n if isinstance(entry, dict) and entry[\"meta_type\"] == \"assertion\"\n ]\n\n for idx, desc in enumerate(expected):\n assert desc == assertions[idx][\"description\"]\n\n\n@testsuite\nclass AssertionExtraAttribute:\n @testcase\n def case(self, env, result):\n first = result.subresult()\n second = result.subresult()\n\n second.false(False, custom_style=None)\n second.false(False, custom_style={\"border\": 1, \"margin\": 2})\n\n first.true(True, custom_style={\"color\": \"red\", \"bgcolor\": \"white\"})\n first.true(True, custom_style={123: \"foo\", 456: \"bar\", 789: \"baz\"})\n\n result.log(\"Report passed so far.\", custom_style={})\n result.prepend(first)\n result.append(second)\n\n\ndef test_assertion_extra_attribute(mockplan):\n \"\"\"Test that required extra attribute correctly recorded in report.\"\"\"\n mtest = MultiTest(\n name=\"AssertionExtraAttribute\", suites=[AssertionExtraAttribute()]\n )\n mtest.cfg.parent = mockplan.cfg\n mtest.run()\n\n expected = [\n {\"color\": \"red\", \"bgcolor\": \"white\"},\n {\"123\": \"foo\", \"456\": \"bar\", \"789\": \"baz\"},\n {},\n {\"border\": \"1\", \"margin\": \"2\"},\n ]\n assertions = [\n entry\n for entry in mtest.report.flatten()\n if isinstance(entry, dict) and \"custom_style\" in entry\n ]\n\n for idx, custom_style in enumerate(expected):\n assert custom_style == assertions[idx][\"custom_style\"]\n\n\[email protected]\ndef dict_ns():\n \"\"\"Dict namespace with a mocked out result object.\"\"\"\n mock_result = mock.MagicMock()\n mock_result.entries = collections.deque()\n return result_mod.DictNamespace(mock_result)\n\n\[email protected]\ndef fix_ns():\n \"\"\"FIX namespace with a mocked out result object.\"\"\"\n mock_result = mock.MagicMock()\n mock_result.entries = collections.deque()\n return result_mod.FixNamespace(mock_result)\n\n\nclass TestDictNamespace:\n \"\"\"Unit testcases for the result.DictNamespace class.\"\"\"\n\n def test_basic_match(self, dict_ns):\n \"\"\"\n Test the match method against identical expected and actual dicts.\n \"\"\"\n expected = {\"key\": 123}\n actual = expected.copy()\n\n assert dict_ns.match(\n actual,\n expected,\n description=\"Basic dictmatch of identical dicts passes\",\n )\n\n assert dict_ns.match(\n actual,\n expected,\n description=\"Force type-check of values\",\n value_cmp_func=comparison.COMPARE_FUNCTIONS[\"check_types\"],\n )\n\n assert dict_ns.match(\n actual,\n expected,\n description=\"Convert values to strings before comparing\",\n value_cmp_func=comparison.COMPARE_FUNCTIONS[\"stringify\"],\n )\n\n def test_duck_match(self, dict_ns):\n \"\"\"\n Test the match method by seting different types that can be compared.\n Due to duck-typing, ints and floats can be equal if they refer to the\n same numeric value - in this case, 123 == 123.0. However if\n type-checking is forced by use of the check_types comparison method\n the assertion will fail.\n \"\"\"\n expected = {\"key\": 123}\n actual = {\"key\": 123.0}\n\n assert dict_ns.match(\n actual,\n expected,\n description=\"Dictmatch passes since the numeric values are equal.\",\n )\n\n assert not dict_ns.match(\n actual,\n expected,\n description=\"Dictmatch fails when type comparison is forced.\",\n value_cmp_func=comparison.COMPARE_FUNCTIONS[\"check_types\"],\n )\n\n assert not dict_ns.match(\n actual,\n expected,\n description=\"Dictmatch with string conversion fails due to \"\n \"different string representations of int/float.\",\n value_cmp_func=comparison.COMPARE_FUNCTIONS[\"stringify\"],\n )\n\n def test_fail_match(self, dict_ns):\n \"\"\"\n Test the match method for types that do not compare equal - in this\n case, 123 should not match \"123\".\n \"\"\"\n expected = {\"key\": 123}\n actual = {\"key\": \"123\"}\n\n assert not dict_ns.match(\n actual, expected, description='Dictmatch fails because 123 != \"123'\n )\n\n assert not dict_ns.match(\n actual,\n expected,\n description=\"Dictmatch fails due to type mismatch\",\n value_cmp_func=comparison.COMPARE_FUNCTIONS[\"check_types\"],\n )\n\n assert dict_ns.match(\n actual,\n expected,\n description=\"Dictmatch passes when values are converted to strings\",\n value_cmp_func=comparison.COMPARE_FUNCTIONS[\"stringify\"],\n )\n\n def test_custom_match(self, dict_ns):\n \"\"\"Test a dict match using a user-defined comparison function.\"\"\"\n expected = {\"key\": 174.24}\n actual = {\"key\": 174.87}\n\n tolerance = 1.0\n\n def cmp_with_tolerance(lhs, rhs):\n \"\"\"Check that both values are within a given tolerance range.\"\"\"\n return abs(lhs - rhs) < tolerance\n\n assert not dict_ns.match(\n actual, expected, description=\"Values are not exactly equal\"\n )\n\n assert dict_ns.match(\n actual,\n expected,\n description=\"Values are equal within tolerance\",\n value_cmp_func=cmp_with_tolerance,\n )\n\n def test_report_modes(self, dict_ns):\n \"\"\"Test controlling report modes for a dict match.\"\"\"\n expected = {\"key{}\".format(i): i for i in range(10)}\n actual = expected.copy()\n expected[\"wrong\"] = \"expected\"\n actual[\"wrong\"] = \"actual\"\n\n assert not dict_ns.match(\n actual, expected, description=\"Keep all comparisons by default\"\n )\n assert len(dict_ns.result.entries) == 1\n dict_assert = dict_ns.result.entries.popleft()\n assert len(dict_assert.comparison) == 11\n\n assert dict_ns.match(\n actual,\n expected,\n description=\"Keep ignored comparisons\",\n include_keys=[\"key{}\".format(i) for i in range(3)],\n )\n\n assert len(dict_ns.result.entries) == 1\n dict_assert = dict_ns.result.entries.popleft()\n assert len(dict_assert.comparison) == 11\n\n assert dict_ns.match(\n actual,\n expected,\n description=\"Discard ignored comparisons\",\n include_keys=[\"key{}\".format(i) for i in range(3)],\n report_mode=comparison.ReportOptions.NO_IGNORED,\n )\n\n assert len(dict_ns.result.entries) == 1\n dict_assert = dict_ns.result.entries.popleft()\n assert len(dict_assert.comparison) == 3\n\n assert not dict_ns.match(\n actual,\n expected,\n report_mode=comparison.ReportOptions.FAILS_ONLY,\n description=\"Discard passing comparisons\",\n )\n assert len(dict_ns.result.entries) == 1\n dict_assert = dict_ns.result.entries.popleft()\n assert len(dict_assert.comparison) == 1\n\n def test_flattened_comparison_result(self, dict_ns):\n \"\"\"Test the comparison result in flattened entries.\"\"\"\n expected = {\n \"foo\": 1,\n \"bar\": lambda val: val >= 1,\n \"baz\": [\n {\n \"apple\": 3,\n \"pear\": 4,\n \"bat\": [\n {\"wine\": \"gin\", \"tea\": re.compile(r\"[a-z]{5}\")},\n {\"wine\": \"vodka\", \"tea\": \"green\"},\n ],\n }\n ],\n }\n actual = copy.deepcopy(expected)\n actual[\"bar\"] = 2\n actual[\"baz\"][0][\"pear\"] = 5\n actual[\"baz\"][0][\"bat\"][0][\"wine\"] = \"lime\"\n actual[\"baz\"][0][\"bat\"][0][\"tea\"] = \"oolong\"\n actual[\"baz\"][0][\"bat\"][1][\"wine\"] = \"brandy\"\n actual[\"baz\"][0][\"bat\"][1][\"tea\"] = \"black\"\n assert dict_ns.match(\n actual,\n expected,\n description=\"complex dictionary comparison\",\n exclude_keys=[\"pear\", \"wine\", \"tea\"],\n )\n assert len(dict_ns.result.entries) == 1\n\n # Comparison result is a list of list items in below format:\n # [indent, key, result, (act_type, act_value), (exp_type, exp_value)]\n comp_result = dict_ns.result.entries[0].comparison\n bar = [item for item in comp_result if item[1] == \"bar\"][0]\n assert bar[0] == 0 and bar[4][0] == \"func\"\n baz = [item for item in comp_result if item[1] == \"baz\"][0]\n assert baz[0] == 0 and baz[2][0].lower() == comparison.Match.PASS\n bat = [item for item in comp_result if item[1] == \"bat\"][0]\n assert bat[0] == 1 and bat[2][0].lower() == comparison.Match.IGNORED\n tea_1, tea_2 = [item for item in comp_result if item[1] == \"tea\"]\n assert (\n tea_1[0] == tea_2[0] == 2\n and tea_1[2][0].lower() == comparison.Match.IGNORED\n and tea_2[2][0].lower() == comparison.Match.IGNORED\n and tea_1[4][0] == \"REGEX\"\n )\n\n\nclass TestFIXNamespace:\n \"\"\"Unit testcases for the result.FixNamespace class.\"\"\"\n\n def test_untyped_fixmatch(self, fix_ns):\n \"\"\"Test FIX matches between untyped FIX messages.\"\"\"\n expected = testing.FixMessage(\n ((35, \"D\"), (38, \"1000000\"), (44, \"125.83\"))\n )\n actual = expected.copy()\n\n assert fix_ns.match(actual, expected, description=\"Basic FIX match\")\n\n def test_typed_fixmatch(self, fix_ns):\n \"\"\"Test FIX matches between typed FIX messages.\"\"\"\n expected = testing.FixMessage(\n ((35, \"D\"), (38, 1000000), (44, 125.83)), typed_values=True\n )\n actual = expected.copy()\n\n assert fix_ns.match(actual, expected, description=\"Basic FIX match\")\n\n # Now change the type of the actual 38 key's value to str. The assert\n # should fail since we are performing a typed match.\n actual[38] = \"1000000\"\n assert not fix_ns.match(\n actual, expected, description=\"Failing str/int comparison\"\n )\n\n # Change the type to a float. The match should still fail because of\n # the type difference, despite the numeric values being equal.\n actual[38] = 1000000.0\n assert not fix_ns.match(\n actual, expected, description=\"Failing float/int comparison\"\n )\n\n def test_mixed_fixmatch(self, fix_ns):\n \"\"\"Test FIX matches between typed and untyped FIX messages.\"\"\"\n expected = testing.FixMessage(\n ((35, \"D\"), (38, \"1000000\"), (44, \"125.83\")), typed_values=False\n )\n actual = testing.FixMessage(\n ((35, \"D\"), (38, \"1000000\"), (44, 125.83)), typed_values=True\n )\n\n assert fix_ns.match(actual, expected, description=\"Mixed FIX match\")\n\n def test_report_modes(self, fix_ns):\n \"\"\"Test controlling report modes for FIX match.\"\"\"\n expected = testing.FixMessage((i, (25 * i) - 4) for i in range(10))\n actual = expected.copy()\n expected[\"wrong\"] = \"expected\"\n actual[\"wrong\"] = \"actual\"\n\n assert not fix_ns.match(\n actual, expected, description=\"Keep all comparisons by default\"\n )\n assert len(fix_ns.result.entries) == 1\n dict_assert = fix_ns.result.entries.popleft()\n assert len(dict_assert.comparison) == 11\n\n assert fix_ns.match(\n actual,\n expected,\n description=\"Keep ignored comparisons\",\n include_tags=[0, 1, 2],\n )\n\n assert len(fix_ns.result.entries) == 1\n dict_assert = fix_ns.result.entries.popleft()\n assert len(dict_assert.comparison) == 11\n\n assert fix_ns.match(\n actual,\n expected,\n description=\"Discard ignored comparisons\",\n include_tags=[0, 1, 2],\n report_mode=comparison.ReportOptions.NO_IGNORED,\n )\n\n assert len(fix_ns.result.entries) == 1\n dict_assert = fix_ns.result.entries.popleft()\n assert len(dict_assert.comparison) == 3\n\n assert not fix_ns.match(\n actual,\n expected,\n report_mode=comparison.ReportOptions.FAILS_ONLY,\n description=\"Discard passing comparisons\",\n )\n assert len(fix_ns.result.entries) == 1\n dict_assert = fix_ns.result.entries.popleft()\n assert len(dict_assert.comparison) == 1\n\n def test_flattened_comparison_result(self, fix_ns):\n \"\"\"Test the comparison result in flattened entries.\"\"\"\n expected = {\n 8: \"FIX42\",\n 9: re.compile(r\"[A-Za-z]{2}\"),\n 555: [\n {\n 600: \"A\",\n 601: \"B\",\n 687: [\n {688: \"opq\", 689: \"rst\"},\n {688: \"uvw\", 689: \"xyz\"},\n ],\n }\n ],\n }\n actual = expected.copy()\n actual[9] = \"AE\"\n actual[555] = [{600: \"A\", 601: \"C\", 700: \"D\"}]\n assert not fix_ns.match(\n actual,\n expected,\n description=\"complex fix message comparison\",\n include_tags=[9, 555, 600, 687],\n )\n assert len(fix_ns.result.entries) == 1\n\n # Comparison result is a list of list items in below format:\n # [indent, key, result, (act_type, act_value), (exp_type, exp_value)]\n comp_result = fix_ns.result.entries[0].comparison\n _8 = [item for item in comp_result if item[1] == 8][0]\n assert _8[0] == 0 and _8[2][0].lower() == comparison.Match.IGNORED\n _9 = [item for item in comp_result if item[1] == 9][0]\n assert (\n _9[0] == 0\n and _9[2][0].lower() == comparison.Match.PASS\n and _9[4][0] == \"REGEX\"\n )\n _555 = [item for item in comp_result if item[1] == 555][0]\n assert _555[0] == 0 and _555[2][0].lower() == comparison.Match.FAIL\n _600 = [item for item in comp_result if item[1] == 600][0]\n assert _600[0] == 1 and _600[2][0].lower() == comparison.Match.PASS\n _601 = [item for item in comp_result if item[1] == 601][0]\n assert _601[0] == 1 and _601[2][0].lower() == comparison.Match.IGNORED\n _687 = [item for item in comp_result if item[1] == 687][0]\n assert (\n _687[0] == 1\n and _687[2][0].lower() == comparison.Match.FAIL\n and _687[3] == (None, \"ABSENT\") # key not found in actual data\n )\n _688_1, _688_2 = [item for item in comp_result if item[1] == 688]\n assert _688_1[0] == 2 and _688_1[2][0].lower() == comparison.Match.FAIL\n assert _688_2[0] == 2 and _688_2[2][0].lower() == comparison.Match.FAIL\n _689_1, _689_2 = [item for item in comp_result if item[1] == 689]\n assert _689_1[0] == 2 and _689_1[2][0].lower() == comparison.Match.FAIL\n assert _689_2[0] == 2 and _689_2[2][0].lower() == comparison.Match.FAIL\n _700 = [item for item in comp_result if item[1] == 700][0]\n assert (\n _700[0] == 1\n and _700[2][0].lower() == comparison.Match.IGNORED\n and _700[4] == (None, \"ABSENT\") # key not found in expected data\n )\n\n\nclass TestResultBaseNamespace:\n \"\"\"Test assertions and other methods in the base result.* namespace.\"\"\"\n\n def test_graph_assertion(self):\n \"\"\"Unit testcase for the result.graph method.\"\"\"\n result = result_mod.Result()\n graph_assertion = result.graph(\n \"Line\",\n {\n \"Data Name\": [\n {\"x\": 0, \"y\": 8},\n {\"x\": 1, \"y\": 5},\n {\"x\": 2, \"y\": 4},\n {\"x\": 3, \"y\": 9},\n {\"x\": 4, \"y\": 1},\n {\"x\": 5, \"y\": 7},\n {\"x\": 6, \"y\": 6},\n {\"x\": 7, \"y\": 3},\n {\"x\": 8, \"y\": 2},\n {\"x\": 9, \"y\": 0},\n ]\n },\n description=\"Line Graph\",\n series_options={\"Data Name\": {\"colour\": \"red\"}},\n graph_options=None,\n )\n\n assert bool(graph_assertion) is True\n assert len(result.entries) == 1\n assert result.entries[0].graph_type is \"Line\"\n assert type(result.entries[0].graph_data) is dict\n assert type(result.entries[0].series_options) is dict\n assert result.entries[0].graph_options is None\n\n def test_attach(self, tmpdir):\n \"\"\"UT for result.attach method.\"\"\"\n tmpfile = str(tmpdir.join(\"attach_me.txt\"))\n with open(tmpfile, \"w\") as f:\n f.write(\"testplan\\n\" * 1000)\n\n result = result_mod.Result(_scratch=str(tmpdir))\n hash = path_utils.hash_file(tmpfile)\n\n assert result.attach(tmpfile, description=\"Attach a text file\")\n assert len(result.entries) == 1\n attachment_entry = result.entries[0]\n\n assert attachment_entry.source_path == os.path.join(\n os.path.dirname(tmpfile), attachment_entry.dst_path\n )\n assert hash in attachment_entry.dst_path\n assert attachment_entry.orig_filename == \"attach_me.txt\"\n assert attachment_entry.filesize == os.path.getsize(tmpfile)\n\n # The expected destination path depends on the exact hash and filesize\n # of the file we wrote.\n expected_dst_path = \"attach_me-{hash}-{filesize}.txt\".format(\n hash=hash, filesize=attachment_entry.filesize\n )\n assert attachment_entry.dst_path == expected_dst_path\n\n def test_attach_in_result_group(self, tmpdir):\n \"\"\"UT for result.attach method.\"\"\"\n tmpfile = str(tmpdir.join(\"attach_me.txt\"))\n with open(tmpfile, \"w\") as f:\n f.write(\"testplan\\n\" * 1000)\n\n size = os.path.getsize(tmpfile)\n description = \"Attach a text file at level: {}\"\n\n result = result_mod.Result(_scratch=str(tmpdir))\n\n assert result.attach(tmpfile, description=description.format(0))\n assert len(result.entries) == 1\n\n with result.group(\"subgroup\") as subgroup:\n assert subgroup.attach(tmpfile, description=description.format(1))\n assert len(subgroup.entries) == 1\n\n with subgroup.group(\"subgroup\") as subsubgroup:\n assert subsubgroup.attach(\n tmpfile, description=description.format(2)\n )\n assert len(subsubgroup.entries) == 1\n\n assert len(subgroup.entries) == 2\n assert len(subgroup.attachments) == 2\n assert len(result.entries) == 2\n assert len(result.attachments) == 3\n\n for idx, attachment in enumerate(result.attachments):\n assert attachment.source_path == os.path.join(\n os.path.dirname(tmpfile), attachment.dst_path\n )\n assert attachment.orig_filename == \"attach_me.txt\"\n assert attachment.filesize == size\n assert attachment.description == description.format(idx)\n\n def test_matplot(self, tmpdir):\n result_dir = str(tmpdir)\n result = result_mod.Result(_scratch=result_dir)\n\n x = range(0, 10)\n y = range(0, 10)\n plot.plot(x, y)\n\n result.matplot(plot, width=4, height=4, description=\"Matplot\")\n\n assert len(result.entries) == 1\n assert len(result.attachments) == 1\n\n with result.group(description=\"subgroup\") as subgroup:\n x = range(0, 10)\n y = range(0, 10)\n plot.plot(x, y)\n\n subgroup.matplot(plot, width=3, height=3, description=\"Matplot\")\n\n assert len(result.entries) == 2\n assert len(result.attachments) == 2\n\n # two different file, with same content on the same directory\n assert (\n result.attachments[0].source_path\n != result.attachments[1].source_path\n )\n assert result.attachments[0].filesize > result.attachments[1].filesize\n assert result.attachments[0].source_path.startswith(result_dir)\n assert result.attachments[1].source_path.startswith(result_dir)\n\n def test_attach_dir(self, tmpdir):\n \"\"\"UT for result.attach method.\"\"\"\n path_utils.makeemptydirs(str(tmpdir.join(\"subdir\")))\n\n tmpfile1 = str(tmpdir.join(\"1.txt\"))\n with open(tmpfile1, \"w\") as f:\n f.write(\"testplan\\n\" * 10)\n\n tmpfile2 = str(tmpdir.join(\"2.txt\"))\n with open(tmpfile2, \"w\") as f:\n f.write(\"testplan\\n\")\n\n tmpfile3 = str(tmpdir.join(\"subdir\").join(\"3.txt\"))\n with open(tmpfile3, \"w\") as f:\n f.write(\"testplan\\n\" * 100)\n\n tmpfile4 = str(tmpdir.join(\"subdir\").join(\"4.txt\"))\n with open(tmpfile4, \"w\") as f:\n f.write(\"testplan\\n\" * 1000)\n\n result = result_mod.Result()\n\n assert result.attach(str(tmpdir), description=\"Attach a directory\")\n assert len(result.entries) == 1\n directory_entry = result.entries[0]\n\n assert directory_entry.source_path == str(tmpdir)\n assert (\n directory_entry.dst_path\n == hashlib.md5(\n directory_entry.source_path.encode(\"utf-8\")\n ).hexdigest()\n )\n assert sorted(directory_entry.file_list) == [\"1.txt\", \"2.txt\"]\n\n assert result.attach(\n str(tmpdir),\n description=\"Attach a directory with filters\",\n ignore=[\"2.*\"],\n only=[\"*.txt\"],\n recursive=True,\n )\n assert len(result.entries) == 2\n directory_entry = result.entries[1]\n\n assert directory_entry.source_path == str(tmpdir)\n assert (\n directory_entry.dst_path\n == hashlib.md5(\n directory_entry.source_path.encode(\"utf-8\")\n ).hexdigest()\n )\n assert sorted(\n [file.replace(\"\\\\\", \"/\") for file in directory_entry.file_list]\n ) == [\n \"1.txt\",\n \"subdir/3.txt\",\n \"subdir/4.txt\",\n ]\n\n def test_bool(self):\n result = result_mod.Result()\n assert result\n assert len(result) == 0\n assert result.passed\n\n first = result.subresult()\n second = result.subresult()\n\n first.true(True, \"AssertionFirst\")\n second.true(True, \"AssertionSecond\")\n\n result.append(first)\n result.append(second)\n\n assert len(result) == 2\n assert result.passed\n\n third = result.subresult()\n third.true(False, \"AssertionThird\")\n result.append(third)\n\n assert len(result) == 3\n assert not result.passed\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.plot" ] ]
ImmortalTurtle/robot-surgery-segmentation
[ "dd86cec33d800c1104e9f89296ef8b1d38e968e2" ]
[ "train.py" ]
[ "import argparse\nimport json\nfrom pathlib import Path\nfrom validation import validation_binary, validation_multi\n\nimport torch\nfrom torch import nn\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nimport torch.backends.cudnn as cudnn\nimport torch.backends.cudnn\n\nfrom models import UNet11, LinkNet34, UNet, UNet16, AlbuNet\nfrom loss import LossBinary, LossMulti\nfrom dataset import CustomDataset\nimport utils\n\nfrom prepare_data import get_split, SIZE\n\nfrom torchvision.transforms import ToTensor\n\nfrom dataset import RandomCrop, Rescale\n\nfrom albumentations import (\n HorizontalFlip,\n VerticalFlip,\n Normalize,\n Compose,\n RandomBrightness,\n OneOf,\n IAAAdditiveGaussianNoise,\n GaussNoise,\n OpticalDistortion,\n HueSaturationValue,\n GridDistortion,\n IAAPiecewiseAffine,\n IAASharpen,\n IAAEmboss,\n RandomContrast,\n RandomBrightness\n)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n arg('--jaccard-weight', default=0.5, type=float)\n arg('--device-ids', type=str, default='0', help='For example 0,1 to run on two GPUs')\n arg('--fold', type=int, help='fold', default=0)\n arg('--root', default='runs/debug', help='checkpoint root')\n arg('--batch-size', type=int, default=1)\n arg('--n-epochs', type=int, default=100)\n arg('--lr', type=float, default=0.0001)\n arg('--workers', type=int, default=12)\n arg('--type', type=str, default='binary', choices=['binary', 'parts', 'instruments'])\n arg('--model', type=str, default='UNet', choices=['UNet', 'UNet11', 'LinkNet34', 'AlbuNet'])\n\n args = parser.parse_args()\n\n root = Path(args.root)\n root.mkdir(exist_ok=True, parents=True)\n\n if args.type == 'parts':\n num_classes = 4\n elif args.type == 'instruments':\n num_classes = 8\n else:\n num_classes = 1\n\n if args.model == 'UNet':\n model = UNet(num_classes=num_classes)\n elif args.model == 'UNet11':\n model = UNet11(num_classes=num_classes, pretrained=True)\n elif args.model == 'UNet16':\n model = UNet16(num_classes=num_classes, pretrained=True)\n elif args.model == 'LinkNet34':\n model = LinkNet34(num_classes=num_classes, pretrained=True)\n elif args.model == 'AlbuNet':\n model = AlbuNet(num_classes=num_classes, pretrained=True)\n else:\n model = UNet(num_classes=num_classes, input_channels=3)\n\n if torch.cuda.is_available():\n if args.device_ids:\n device_ids = list(map(int, args.device_ids.split(',')))\n else:\n device_ids = None\n model = nn.DataParallel(model, device_ids=device_ids).cuda()\n\n if args.type == 'binary':\n loss = LossBinary(jaccard_weight=args.jaccard_weight)\n else:\n loss = LossMulti(num_classes=num_classes, jaccard_weight=args.jaccard_weight)\n\n cudnn.benchmark = True\n\n def make_loader(file_names, shuffle=False, transform=None, problem_type='binary', batch_size=1):\n return DataLoader(\n dataset=CustomDataset(file_names, transform=transform), \n shuffle=shuffle,\n num_workers=args.workers,\n batch_size=batch_size,\n pin_memory=torch.cuda.is_available()\n )\n\n train_file_names, val_file_names = get_split()\n\n print('num train = {}, num_val = {}'.format(len(train_file_names), len(val_file_names)))\n\n def train_transform(p=1):\n return Compose([\n# Rescale(SIZE),\n RandomCrop(SIZE),\n RandomBrightness(0.2),\n OneOf([\n IAAAdditiveGaussianNoise(),\n GaussNoise(),\n ], p=0.15),\n# OneOf([\n# OpticalDistortion(p=0.3),\n# GridDistortion(p=.1),\n# IAAPiecewiseAffine(p=0.3),\n# ], p=0.1),\n# OneOf([\n# IAASharpen(),\n# IAAEmboss(),\n# RandomContrast(),\n# RandomBrightness(),\n# ], p=0.15),\n HueSaturationValue(p=0.15),\n HorizontalFlip(p=0.5),\n Normalize(p=1),\n ], p=p)\n\n def val_transform(p=1):\n return Compose([\n# Rescale(256),\n RandomCrop(SIZE),\n Normalize(p=1)\n ], p=p)\n\n train_loader = make_loader(train_file_names, shuffle=True, transform=train_transform(p=1), problem_type=args.type,\n batch_size=args.batch_size)\n valid_loader = make_loader(val_file_names, transform=val_transform(p=1), problem_type=args.type,\n batch_size=len(device_ids))\n\n root.joinpath('params.json').write_text(\n json.dumps(vars(args), indent=True, sort_keys=True))\n\n if args.type == 'binary':\n valid = validation_binary\n else:\n valid = validation_multi\n\n utils.train(\n init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),\n args=args,\n model=model,\n criterion=loss,\n train_loader=train_loader,\n valid_loader=valid_loader,\n validation=valid,\n fold=args.fold,\n num_classes=num_classes\n )\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.cuda.is_available", "torch.nn.DataParallel" ] ]
clintonjwang/cbct-recon
[ "75d5458188d3e1f663b22f64d6aa1306e5da6d11" ]
[ "cnn_builder.py" ]
[ "\"\"\"\nAuthor: Clinton Wang, E-mail: `[email protected]`, Github: `https://github.com/clintonjwang/lipiodol`\nAuthor: David G Ellis (https://github.com/ellisdg/3DUnetCNN)\n\"\"\"\n\nimport keras.backend as K\nfrom keras_contrib.layers.normalization import InstanceNormalization\nfrom keras.layers import Input, Dense, Concatenate, Flatten, Dropout, Lambda\nfrom keras.layers import SimpleRNN, Conv2D, MaxPooling2D, ZeroPadding3D, Activation, ELU, TimeDistributed, Permute, Reshape\nfrom keras.layers.normalization import BatchNormalization\nimport keras.layers as layers\nfrom keras.models import Model\nfrom keras.callbacks import EarlyStopping\nfrom keras.optimizers import Adam, RMSprop\nfrom keras.utils import np_utils\n\nimport argparse\nimport copy\nimport config\nimport importlib\nimport niftiutils.cnn_components as cnnc\nimport niftiutils.helper_fxns as hf\nimport niftiutils.transforms as tr\nimport math\nfrom math import log, ceil\nimport glob\nimport numpy as np\nimport operator\nimport os\nfrom os.path import *\nimport pandas as pd\nimport random\nfrom scipy.misc import imsave\nfrom skimage.transform import rescale\nfrom niftiutils.metrics import dice_coefficient_loss, get_label_dice_coefficient_function, dice_coefficient\nimport time\nimport keras.regularizers as regularizers\n\ndef norm(x):\n\tx -= K.min(x)\n\tx = x/K.max(x)\n\treturn x\n\ndef build_cnn(optimizer='adam', lr=0.00002):\n\t\"\"\"Main class for setting up a CNN. Returns the compiled model.\"\"\"\n\timportlib.reload(config)\n\n\tC = config.Config()\n\n\tproj = layers.Input(C.proj_dims)\n\t#x = layers.Permute((2,1,3))(img)\n\tx = layers.Reshape((C.proj_dims[0],-1))(proj)\n\tx = layers.Dense(1024, activation='tanh')(x) #, kernel_regularizer=regularizers.l1(0.01)\n\tx = layers.BatchNormalization()(x)\n\t#x = layers.Reshape((C.proj_dims[0],32,-1))(x)\n\t#x = layers.Conv2D(128, 3, activation='relu', padding='same')(x)\n\t#x = layers.Reshape((C.proj_dims[0],-1))(x)\n\tx = layers.Dense(1024, activation='tanh')(x)\n\tx = layers.BatchNormalization()(x)\n\tx = layers.Reshape((C.proj_dims[0],32,32,-1))(x)\n\tx = layers.Conv3D(64, 3, activation='relu', padding='same')(x)\n\t#x = layers.UpSampling3D((1,2,2))(x)\n\tx = layers.MaxPooling3D((2,1,1))(x)\n\tx = layers.Conv3D(64, 3, activation='relu', padding='same')(x)\n\tx = layers.BatchNormalization()(x)\n\tx = layers.Conv3DTranspose(1, 3, activation='sigmoid', padding='same')(x)\n\timg = layers.Reshape(C.world_dims)(x)\n\t#x = layers.Lambda(norm)(x)\n\t#x = layers.Permute((2,1,3))(x)\n\t#x = layers.Conv2D(64, (2,2), activation='relu', padding='same')(x)\n\t#x = layers.Conv2D(64, (2,2), padding='same')(x)\n\n\tmodel = Model(proj, img)\n\tmodel.compile(optimizer=RMSprop(lr=lr, decay=0.1), loss='mse')\n\n\tif False:\n\t\tx = layers.Reshape((C.proj_dims[0],-1))(proj)\n\t\tx = layers.Dense(1024, activation='tanh')(x)\n\t\tx = layers.BatchNormalization()(x)\n\t\tx = layers.Dense(1024, activation='tanh')(x)\n\t\tx = layers.BatchNormalization()(x)\n\t\tx = layers.Reshape((C.proj_dims[0],32,32,-1))(x)\n\t\tx = layers.Conv3D(64, (3,3,3), activation='relu', padding='same')(x)\n\t\tx = layers.UpSampling3D((1,2,2))(x)\n\t\tx = layers.Conv3D(64, (3,3,3), activation='relu', padding='same')(x)\n\t\tx = layers.BatchNormalization()(x)\n\t\tx = layers.Conv3DTranspose(1, (1,3,3), activation='sigmoid', padding='same')(x)\n\n\treturn model\n\ndef train_for_n(nb_epoch=5000, plt_frq=25,BATCH_SIZE=32):\n\n\tfor e in tqdm(range(nb_epoch)):\n\t\t# Make generative images\n\t\timage_batch = X_train[np.random.randint(0,X_train.shape[0],size=BATCH_SIZE),:,:,:] \n\t\tnoise_gen = np.random.uniform(0,1,size=[BATCH_SIZE,100])\n\t\tgenerated_images = generator.predict(noise_gen)\n\t\t\n\t\t# Train discriminator on generated images\n\t\tX = np.concatenate((image_batch, generated_images))\n\t\ty = np.zeros([2*BATCH_SIZE,2])\n\t\ty[0:BATCH_SIZE,1] = 1\n\t\ty[BATCH_SIZE:,0] = 1\n\t\t\n\t\t#make_trainable(discriminator,True)\n\t\td_loss = discriminator.train_on_batch(X,y)\n\t\tlosses[\"d\"].append(d_loss)\n\t\n\t\t# train Generator-Discriminator stack on input noise to non-generated output class\n\t\tnoise_tr = np.random.uniform(0,1,size=[BATCH_SIZE,100])\n\t\ty2 = np.zeros([BATCH_SIZE,2])\n\t\ty2[:,1] = 1\n\t\t\n\t\t#make_trainable(discriminator,False)\n\t\tg_loss = GAN.train_on_batch(noise_tr, y2 )\n\t\tlosses[\"g\"].append(g_loss)\n\t\t\n\t\t# Updates plots\n\t\tif e%plt_frq==plt_frq-1:\n\t\t\tplot_loss(losses)\n\t\t\tplot_gen()\n\n\n####################################\n### Training Submodules\n####################################\n\ndef train_generator(n=8):\n\tC = config.Config()\n\n\tfns = glob.glob(r\"D:\\CBCT\\Train\\NPYs\\*_img.npy\")\n\twhile True:\n\t\tlesion_ids = random.sample(fns, n)\n\t\tX_train = np.empty((n,*C.proj_dims))\n\t\tY_train = np.empty((n,*C.world_dims))\n\n\t\tfor ix, lesion_id in enumerate(lesion_ids):\n\t\t\tX_train[ix] = np.load(lesion_id.replace(\"_img\", \"_proj\"))\n\t\t\tY_train[ix] = tr.rescale_img(np.load(lesion_id), C.world_dims)\n\n\t\tyield X_train, Y_train\n" ]
[ [ "numpy.concatenate", "numpy.empty", "numpy.zeros", "numpy.load", "numpy.random.uniform", "numpy.random.randint" ] ]
glotzerlab/rowan
[ "c17147859432d3dc2b023e086632afa8f447f190" ]
[ "rowan/functions.py" ]
[ "# Copyright (c) 2019 The Regents of the University of Michigan\n# All rights reserved.\n# This software is licensed under the BSD 3-Clause License.\nr\"\"\"Submodule containing all standard functions.\"\"\"\n\nimport numpy as np\n\n\ndef exp(q):\n r\"\"\"Compute the natural exponential function :math:`e^q`.\n\n The exponential of a quaternion in terms of its scalar and vector parts\n :math:`q = a + \\boldsymbol{v}` is defined by exponential power series:\n formula :math:`e^x = \\sum_{k=0}^{\\infty} \\frac{x^k}{k!}` as follows:\n\n .. math::\n \\begin{align}\n e^q &= e^{a+v} \\\\\n &= e^a \\left(\\sum_{k=0}^{\\infty} \\frac{v^k}{k!} \\right) \\\\\n &= e^a \\left(\\cos \\lvert \\lvert \\boldsymbol{v} \\rvert \\rvert +\n \\frac{\\boldsymbol{v}}{\\lvert \\lvert \\boldsymbol{v} \\rvert\n \\rvert} \\sin \\lvert \\lvert \\boldsymbol{v} \\rvert \\rvert\n \\right)\n \\end{align}\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: Exponentials of ``q``.\n\n Example::\n\n >>> rowan.exp([1, 0, 0, 0])\n array([2.71828183, 0. , 0. , 0. ])\n \"\"\"\n q = np.asarray(q)\n\n expo = np.empty(q.shape)\n norms = np.linalg.norm(q[..., 1:], axis=-1)\n e = np.exp(q[..., 0])\n expo[..., 0] = e * np.cos(norms)\n norm_zero = np.isclose(norms, 0)\n not_zero = np.logical_not(norm_zero)\n if np.any(not_zero):\n expo[not_zero, 1:] = (\n e[not_zero, np.newaxis]\n * (q[not_zero, 1:] / norms[not_zero, np.newaxis])\n * np.sin(norms)[not_zero, np.newaxis]\n )\n if np.any(norm_zero):\n expo[norm_zero, 1:] = 0\n else:\n expo[..., 1:] = 0\n\n return expo\n\n\ndef expb(q, b):\n r\"\"\"Compute the exponential function :math:`b^q`.\n\n We define the exponential of a quaternion to an arbitrary base relative\n to the exponential function :math:`e^q` using the change of base\n formula as follows:\n\n .. math::\n \\begin{align}\n b^q &= y \\\\\n q &= \\log_b y = \\frac{\\ln y}{\\ln b}\\\\\n y &= e^{q\\ln b}\n \\end{align}\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: Exponentials of ``q``.\n\n Example::\n\n >>> rowan.expb([1, 0, 0, 0], 2)\n array([2., 0., 0., 0.])\n \"\"\"\n q = np.asarray(q)\n return exp(q * np.log(b))\n\n\ndef exp10(q):\n r\"\"\"Compute the exponential function :math:`10^q`.\n\n Wrapper around :func:`expb`.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: Exponentials of ``q``.\n\n Example::\n\n >>> rowan.exp10([1, 0, 0, 0])\n array([10., 0., 0., 0.])\n \"\"\"\n return expb(q, 10)\n\n\ndef log(q):\n r\"\"\"Compute the quaternion natural logarithm.\n\n The natural of a quaternion in terms of its scalar and vector parts\n :math:`q = a + \\boldsymbol{v}` is defined by inverting the exponential\n formula (see :func:`exp`), and is defined by the formula\n :math:`\\frac{x^k}{k!}` as follows:\n\n .. math::\n \\begin{equation}\n \\ln(q) = \\ln\\lvert\\lvert q \\rvert\\rvert +\n \\frac{\\boldsymbol{v}}{\\lvert\\lvert \\boldsymbol{v}\n \\rvert\\rvert} \\arccos\\left(\\frac{a}{q}\\right)\n \\end{equation}\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: Logarithms of ``q``.\n\n Example::\n\n >>> rowan.log([1, 0, 0, 0])\n array([0., 0., 0., 0.])\n \"\"\"\n q = np.asarray(q)\n log = np.empty(q.shape)\n\n # We need all the norms to avoid divide by zeros later.\n # Can also use these to minimize the amount of work done.\n q_norms = norm(q)\n q_norm_zero = np.isclose(q_norms, 0)\n q_not_zero = np.logical_not(q_norm_zero)\n v_norms = np.linalg.norm(q[..., 1:], axis=-1)\n v_norm_zero = np.isclose(v_norms, 0)\n v_not_zero = np.logical_not(v_norm_zero)\n\n if np.any(q_not_zero):\n if np.any(q_norm_zero):\n log[q_norm_zero, 0] = -np.inf\n log[q_not_zero, 0] = np.log(q_norms[q_not_zero])\n else:\n log[..., 0] = -np.inf\n\n if np.any(v_not_zero):\n prefactor = np.empty(q[v_not_zero, 1:].shape)\n prefactor = q[v_not_zero, 1:] / v_norms[v_not_zero, np.newaxis]\n\n inv_cos = np.empty(v_norms[v_not_zero].shape)\n inv_cos = np.arccos(q[v_not_zero, 0] / q_norms[v_not_zero])\n\n if np.any(v_norm_zero):\n log[v_norm_zero, 1:] = 0\n log[v_not_zero, 1:] = prefactor * inv_cos[..., np.newaxis]\n else:\n log[..., 1:] = 0\n\n return log\n\n\ndef logb(q, b):\n r\"\"\"Compute the quaternion logarithm to some base b.\n\n The quaternion logarithm for arbitrary bases is defined using the\n standard change of basis formula relative to the natural logarithm.\n\n .. math::\n \\begin{align}\n \\log_b q &= y \\\\\n q &= b^y \\\\\n \\ln q &= y \\ln b \\\\\n y &= \\log_b q = \\frac{\\ln q}{\\ln b}\n \\end{align}\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n n ((...) :class:`numpy.ndarray`): Scalars to use as log bases.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: Logarithms of ``q``.\n\n Example::\n\n >>> rowan.logb([1, 0, 0, 0], 2)\n array([0., 0., 0., 0.])\n \"\"\"\n q = np.asarray(q)\n return log(q) / np.log(b)\n\n\ndef log10(q):\n r\"\"\"Compute the quaternion logarithm base 10.\n\n Wrapper around :func:`logb`.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: Logarithms of ``q``.\n\n Example::\n\n >>> rowan.log10([1, 0, 0, 0])\n array([0., 0., 0., 0.])\n \"\"\"\n q = np.asarray(q)\n return logb(q, 10)\n\n\ndef power(q, n):\n r\"\"\"Compute the power of a quaternion :math:`q^n`.\n\n Quaternions raised to a scalar power are defined according to the polar\n decomposition angle :math:`\\theta` and vector :math:`\\hat{u}`:\n :math:`q^n = \\lvert\\lvert q \\rvert\\rvert^n \\left( \\cos(n\\theta) + \\hat{u}\n \\sin(n\\theta)\\right)`. However, this can be computed\n more efficiently by noting that :math:`q^n = \\exp(n \\ln(q))`.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n n ((...) :class:`numpy.ndarray`): Scalars to exponentiate quaternions with.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: Powers of ``q``.\n\n Example::\n\n >>> rowan.power([1, 0, 0, 0], 5)\n array([1., 0., 0., 0.])\n \"\"\"\n # TODO: Write polar decomposition function #noqa\n q = np.asarray(q)\n\n newshape = np.broadcast(q[..., 0], n).shape\n q = np.broadcast_to(q, newshape + (4,))\n n = np.broadcast_to(n, newshape)\n\n # Note that we follow the convention that 0^0 = 1\n check = n == 0\n if np.any(check):\n powers = np.empty(newshape + (4,))\n powers[check] = np.array([1, 0, 0, 0])\n not_check = np.logical_not(check)\n if np.any(not_check):\n powers[not_check] = exp(n[not_check, np.newaxis] * log(q[not_check, :]))\n else:\n powers = exp(n[..., np.newaxis] * log(q))\n\n return powers\n\n\ndef conjugate(q):\n r\"\"\"Conjugates an array of quaternions.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: Conjugates of ``q``.\n\n Example::\n\n >>> rowan.conjugate([0.5, 0.5, -0.5, 0.5])\n array([ 0.5, -0.5, 0.5, -0.5])\n \"\"\"\n # Don't use asarray to avoid modifying in place\n conjugate = np.array(q)\n conjugate[..., 1:] *= -1\n return conjugate\n\n\ndef inverse(q):\n r\"\"\"Compute the inverse of an array of quaternions.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: Inverses of ``q``.\n\n Example::\n\n >>> rowan.inverse([1, 0, 0, 0])\n array([ 1., -0., -0., -0.])\n \"\"\"\n # Copy input so that we can safely modify in place, ensure float.\n inverses = np.array(q, dtype=float)\n\n normsq = norm(inverses) ** 2\n if np.any(normsq):\n inverses[..., 1:] *= -1\n # Would like to do this in place, but can't guarantee type safety\n inverses[normsq > 0] = inverses[normsq > 0] / normsq[normsq > 0, np.newaxis]\n\n return inverses\n\n\ndef multiply(qi, qj):\n r\"\"\"Multiplies two arrays of quaternions.\n\n Note that quaternion multiplication is generally non-commutative, so the\n first and second set of quaternions must be passed in the correct order.\n\n Args:\n qi ((..., 4) :class:`numpy.ndarray`): Array of left quaternions.\n qj ((..., 4) :class:`numpy.ndarray`): Array of right quaternions.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`:\n Element-wise products of ``q`` (obeying broadcasting rules up to the last\n dimension of ``qi`` and ``qj``).\n\n Example::\n\n >>> rowan.multiply([1, 0, 0, 0], [2, 0, 0, 0])\n array([2., 0., 0., 0.])\n \"\"\"\n qi = np.asarray(qi)\n qj = np.asarray(qj)\n\n output = np.empty(np.broadcast(qi, qj).shape)\n\n output[..., 0] = qi[..., 0] * qj[..., 0] - np.sum(\n qi[..., 1:] * qj[..., 1:], axis=-1\n )\n output[..., 1:] = (\n qi[..., 0, np.newaxis] * qj[..., 1:]\n + qj[..., 0, np.newaxis] * qi[..., 1:]\n + np.cross(qi[..., 1:], qj[..., 1:])\n )\n return output\n\n\ndef divide(qi, qj):\n r\"\"\"Divides two arrays of quaternions.\n\n Division is non-commutative; this function returns\n :math:`q_i q_j^{-1}`.\n\n Args:\n qi ((..., 4) :class:`numpy.ndarray`): Dividend quaternions.\n qj ((..., 4) :class:`numpy.ndarray`): Divisor quaternions.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`:\n Element-wise quotients of ``q`` (obeying broadcasting rules up to the last\n dimension of ``qi`` and ``qj``).\n\n Example::\n\n >>> rowan.divide([1, 0, 0, 0], [2, 0, 0, 0])\n array([0.5, 0. , 0. , 0. ])\n \"\"\"\n return multiply(qi, inverse(qj))\n\n\ndef norm(q):\n r\"\"\"Compute the quaternion norm.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n\n Returns:\n (...) :class:`numpy.ndarray`: Norms of ``q``.\n\n Example::\n\n >>> rowan.norm([10, 0, 0, 0])\n 10.0\n \"\"\"\n q = np.asarray(q)\n return np.linalg.norm(q, axis=-1)\n\n\ndef normalize(q):\n r\"\"\"Normalize quaternions.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: Normalized versions of ``q``.\n\n Example::\n\n >>> rowan.normalize([10, 0, 0, 0])\n array([1., 0., 0., 0.])\n \"\"\"\n q = np.asarray(q)\n norms = norm(q)\n return q / norms[..., np.newaxis]\n\n\ndef is_unit(q):\n \"\"\"Check if all input quaternions have unit norm.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n\n Returns:\n (...) :class:`numpy.ndarray` of bool:\n Whether or not all inputs are unit quaternions.\n\n Example::\n\n >>> rowan.is_unit([10, 0, 0, 0])\n False\n \"\"\"\n return np.allclose(norm(q), 1)\n\n\ndef _validate_unit(q, msg=\"Arguments must be unit quaternions\"):\n \"\"\"Ensure that all quaternions in q have unit norm.\"\"\"\n if not is_unit(q):\n raise ValueError(msg)\n\n\ndef from_mirror_plane(x, y, z):\n r\"\"\"Generate quaternions from mirror plane equations.\n\n Reflection quaternions can be constructed from the form\n :math:`(0, x, y, z)`, *i.e.* with zero real component. The vector\n :math:`(x, y, z)` is the normal to the mirror plane.\n\n Args:\n x ((...) :class:`numpy.ndarray`): First planar component.\n y ((...) :class:`numpy.ndarray`): Second planar component.\n z ((...) :class:`numpy.ndarray`): Third planar component.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`:\n Quaternions reflecting about the input plane :math:`(x, y, z)`.\n\n Example::\n\n >>> rowan.from_mirror_plane(*(1, 2, 3))\n array([0., 1., 2., 3.])\n \"\"\"\n x, y, z = np.broadcast_arrays(x, y, z)\n q = np.empty(x.shape + (4,))\n q[..., 0] = 0\n q[..., 1] = x\n q[..., 2] = y\n q[..., 3] = z\n\n return q\n\n\ndef _promote_vec(v):\n \"\"\"Promote vectors to their quaternion representation.\"\"\"\n return np.concatenate((np.zeros(v.shape[:-1] + (1,)), v), axis=-1)\n\n\ndef reflect(q, v):\n r\"\"\"Reflect a list of vectors by a corresponding set of quaternions.\n\n For help constructing a mirror plane, see :func:`from_mirror_plane`.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n v ((..., 3) :class:`numpy.ndarray`): Array of vectors.\n\n Returns:\n (..., 3) :class:`numpy.ndarray`:\n The result of reflecting ``v`` using ``q``.\n\n Example::\n\n >>> rowan.reflect([1, 0, 0, 0], [1, 1, 1])\n array([1., 1., 1.])\n \"\"\"\n q = np.asarray(q)\n v = np.asarray(v)\n\n # Convert vector to quaternion representation\n quat_v = _promote_vec(v)\n return multiply(q, multiply(quat_v, q))[..., 1:]\n\n\ndef rotate(q, v):\n r\"\"\"Rotate a list of vectors by a corresponding set of quaternions.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n v ((..., 3) :class:`numpy.ndarray`): Array of vectors.\n\n Returns:\n (..., 3) :class:`numpy.ndarray`:\n The result of rotating ``v`` using ``q``.\n\n Example::\n\n >>> rowan.rotate([1, 0, 0, 0], [1, 1, 1])\n array([1., 1., 1.])\n \"\"\"\n q = np.asarray(q)\n v = np.asarray(v)\n\n # Convert vector to quaternion representation\n quat_v = _promote_vec(v)\n return multiply(q, multiply(quat_v, conjugate(q)))[..., 1:]\n\n\ndef _normalize_vec(v):\n r\"\"\"Normalize vectors.\"\"\"\n v = np.asarray(v)\n norms = np.linalg.norm(v, axis=-1)\n return v / norms[..., np.newaxis]\n\n\ndef _vector_bisector(v1, v2):\n r\"\"\"Find the vector bisecting two vectors.\n\n Args:\n v1 ((..., 3) :class:`numpy.ndarray`): First array of vectors.\n v2 ((..., 3) :class:`numpy.ndarray`): Second array of vectors.\n\n Returns:\n (..., 3) :class:`numpy.ndarray`: The vector bisectors.\n \"\"\"\n # Since np.inner and np.dot require manipulating the shapes in ways that\n # might be expensive and may not play nicely with broadcasting, we perform\n # the dot product manually on the broadcasted arrays\n v1_norm, v2_norm = np.broadcast_arrays(_normalize_vec(v1), _normalize_vec(v2))\n ap = np.isclose(np.sum(v1_norm * v2_norm, axis=-1), -1)\n\n if np.any(ap):\n result = np.empty(v1_norm.shape)\n\n # Parallel vectors are fine, only antiparallel vectors cause problems\n not_ap = np.logical_not(ap)\n result[not_ap] = _normalize_vec(v1_norm[not_ap] + v2_norm[not_ap])\n\n # To use cross products to find the normal, we need to choose a unit\n # vector that is also not (anti)parallel to the original. Keep two\n # options available to avoid this case.\n one_vec = np.array([[1, 0, 0]])\n other_one_vec = np.array([[0, 1, 0]])\n cross_element = np.where(\n np.isclose(np.abs(np.dot(v1_norm[ap], one_vec.T)), 1),\n other_one_vec,\n one_vec,\n )\n result[ap] = np.cross(v1_norm[ap], cross_element)\n\n return result\n else:\n return _normalize_vec(v1_norm + v2_norm)\n\n\ndef vector_vector_rotation(v1, v2):\n r\"\"\"Find the quaternion to rotate one vector onto another.\n\n .. note::\n\n Vector-vector rotation is underspecified, with one degree of freedom\n possible in the resulting quaternion. This method chooses to rotate by\n :math:`\\pi` around the vector bisecting v1 and v2.\n\n Args:\n v1 ((..., 3) :class:`numpy.ndarray`): Array of vectors to rotate.\n v2 ((..., 3) :class:`numpy.ndarray`): Array of vector to rotate onto.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: Quaternions that rotate ``v1`` onto ``v2``.\n\n Example::\n\n >>> rowan.vector_vector_rotation([1, 0, 0], [0, 1, 0])\n array([6.12323400e-17, 7.07106781e-01, 7.07106781e-01, 0.00000000e+00])\n \"\"\"\n v1 = np.asarray(v1)\n v2 = np.asarray(v2)\n return from_axis_angle(_vector_bisector(v1, v2), np.pi)\n\n\ndef from_euler(alpha, beta, gamma, convention=\"zyx\", axis_type=\"intrinsic\"):\n r\"\"\"Convert Euler angles to quaternions.\n\n For generality, the rotations are computed by composing a sequence of\n quaternions corresponding to axis-angle rotations. While more efficient\n implementations are possible, this method was chosen to prioritize\n flexibility since it works for essentially arbitrary Euler angles as\n long as intrinsic and extrinsic rotations are not intermixed.\n\n Args:\n alpha ((...) :class:`numpy.ndarray`):\n Array of :math:`\\alpha` values in radians.\n beta ((...) :class:`numpy.ndarray`):\n Array of :math:`\\beta` values in radians.\n gamma ((...) :class:`numpy.ndarray`):\n Array of :math:`\\gamma` values in radians.\n convention (str):\n One of the 12 valid conventions xzx, xyx, yxy, yzy, zyz, zxz, xzy, xyz, yxz,\n yzx, zyx, zxy.\n axes (str):\n Whether to use extrinsic or intrinsic rotations.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: Quaternions corresponding to the input angles.\n\n Example::\n\n >>> rowan.from_euler(0.3, 0.5, 0.7)\n array([0.91262714, 0.29377717, 0.27944389, 0.05213241])\n \"\"\"\n angles = np.broadcast_arrays(alpha, beta, gamma)\n\n convention = convention.lower()\n\n if len(convention) > 3 or (set(convention) - set(\"xyz\")):\n raise ValueError(\n \"All acceptable conventions must be 3 \\\ncharacter strings composed only of x, y, and z\"\n )\n\n basis_axes = {\n \"x\": np.array([1, 0, 0]),\n \"y\": np.array([0, 1, 0]),\n \"z\": np.array([0, 0, 1]),\n }\n # Temporary method to ensure shapes conform\n for ax, vec in basis_axes.items():\n basis_axes[ax] = np.broadcast_to(vec, angles[0].shape + (vec.shape[-1],))\n\n # Split by convention, the easiest\n rotations = []\n if axis_type == \"extrinsic\":\n # Loop over the axes and add each rotation\n for i, char in enumerate(convention):\n ax = basis_axes[char]\n rotations.append(from_axis_angle(ax, angles[i]))\n elif axis_type == \"intrinsic\":\n for i, char in enumerate(convention):\n ax = basis_axes[char]\n rotations.append(from_axis_angle(ax, angles[i]))\n # Rotate the bases as well\n for key, value in basis_axes.items():\n basis_axes[key] = rotate(rotations[-1], value)\n else:\n raise ValueError(\"Only valid axis_types are intrinsic and extrinsic\")\n\n # Compose the total rotation\n final_rotation = np.broadcast_to(np.array([1, 0, 0, 0]), rotations[0].shape)\n for q in rotations:\n final_rotation = multiply(q, final_rotation)\n\n return final_rotation\n\n\ndef to_euler(q, convention=\"zyx\", axis_type=\"intrinsic\"): # noqa: C901\n r\"\"\"Convert quaternions to Euler angles.\n\n Euler angles are returned in the sequence provided, so in, *e.g.*,\n the default case ('zyx'), the angles returned are for a rotation\n :math:`Z(\\alpha) Y(\\beta) X(\\gamma)`.\n\n .. note::\n\n In all cases, the :math:`\\alpha` and :math:`\\gamma` angles are\n between :math:`\\pm \\pi`. For proper Euler angles, :math:`\\beta`\n is between :math:`0` and :math:`\\pi` degrees. For Tait-Bryan\n angles, :math:`\\beta` lies between :math:`\\pm\\pi/2`.\n\n For simplicity, quaternions are converted to matrices, which are\n then converted to their Euler angle representations. All equations\n for rotations are derived by considering compositions of the `three\n elemental rotations about the three Cartesian axes\n <https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations>`_. A\n Mathematica notebook describing this process can be found in the\n `misc subdirectory of the repository\n <https://github.com/glotzerlab/rowan/blob/master/misc/Euler.nb>`__.\n\n Extrinsic rotations are represented by matrix multiplications in\n the proper order, so :math:`z-y-x` is represented by the\n multiplication :math:`XYZ` so that the system is rotated first\n about :math:`Z`, then about :math:`Y`, then finally :math:`X`.\n For intrinsic rotations, the order of rotations is reversed,\n meaning that it matches the order in which the matrices actually\n appear *i.e.* the :math:`z-y'-x''` convention (yaw, pitch, roll)\n corresponds to the multiplication of matrices :math:`ZYX`.\n For proof of the relationship between intrinsic and extrinsic\n rotations, see the `Wikipedia page on Davenport chained rotations\n <https://en.wikipedia.org/wiki/Davenport_chained_rotations>`_.\n\n For more information, see the Wikipedia page for\n `Euler angles <https://en.wikipedia.org/wiki/Euler_angles>`_\n (specifically the section on converting between representations).\n\n .. warning::\n\n Euler angles are a highly problematic representation for a number of\n reasons, not least of which is the large number of possible conventions\n and their relative imprecision when compared to using quaternions (or\n axis-angle representations). If possible, you should avoid Euler angles\n and work with quaternions instead. If Euler angles are required, note\n that they are susceptible to `gimbal lock\n <https://en.wikipedia.org/wiki/Gimbal_lock>`_, which leads to ambiguity\n in the representation of a given rotation. To address this issue, in\n cases where gimbal lock arises, :func:`~.to_euler` adopts the\n convention that :math:`\\gamma=0` and represents the rotation entirely\n in terms of :math:`\\beta` and :math:`\\alpha`.\n\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`):\n Quaternions to transform.\n convention (str):\n One of the 6 valid conventions zxz, xyx, yzy, zyz, xzx, yxy.\n axes (str):\n Whether to use extrinsic or intrinsic.\n\n Returns:\n (..., 3) :class:`numpy.ndarray`:\n Euler angles :math:`(\\alpha, \\beta, \\gamma)` corresponding to ``q``.\n\n Example::\n\n >>> import numpy as np\n >>> rands = np.random.rand(100, 3)\n >>> alpha, beta, gamma = rands.T\n >>> ql = rowan.from_euler(alpha, beta, gamma)\n >>> alpha_return, beta_return, gamma_return = np.split(\n ... rowan.to_euler(ql), 3, axis = 1)\n >>> assert(np.allclose(alpha_return.flatten(), alpha))\n >>> assert(np.allclose(beta_return.flatten(), beta))\n >>> assert(np.allclose(gamma_return.flatten(), gamma))\n \"\"\"\n q = np.asarray(q)\n _validate_unit(q)\n atol = 1e-3\n\n try:\n # Due to minor numerical imprecision, the to_matrix function could\n # generate a (very slightly) nonorthogonal matrix (e.g. with a norm of\n # 1 + 2e-8). That is sufficient to throw off the trigonometric\n # functions, so it's worthwhile to explicitly clip for safety,\n # especially since we've already checked the quaternion norm.\n mats = np.clip(to_matrix(q), -1, 1)\n except ValueError:\n raise ValueError(\"Not all quaternions in q are unit quaternions.\")\n\n # For intrinsic angles, the matrix must be constructed in reverse order\n # e.g. Z(\\alpha)Y'(\\beta)Z''(\\gamma) (where primes denote the rotated\n # frames) becomes the extrinsic rotation Z(\\gamma)Y(\\beta)Z(\\alpha). Simply\n # for easier readability of order, matrices are constructed for the\n # intrinsic angle ordering and just reversed for extrinsic.\n if axis_type == \"extrinsic\":\n convention = convention[::-1]\n elif not axis_type == \"intrinsic\":\n raise ValueError(\"The axis type must be either extrinsic or intrinsic\")\n\n # We have to hardcode the different convention possibilities since they all\n # result in different matrices according to the rotation order. In all\n # possible compositions, there are cases where, given some 0 elements in\n # the matrix, the simplest combination of matrix elements will give the\n # wrong solution. In those cases, we have to use other parts of the\n # matrix. In those cases, we have to be much more careful about signs,\n # because there are multiple places where negatives can come into play. Due\n # to gimbal lock, the alpha and gamma angles are no longer independent in\n # that case. By convention, we set gamma to 0 and solve for alpha in those\n # cases.\n\n # Classical Euler angles\n if convention == \"xzx\":\n beta = np.arccos(mats[..., 0, 0])\n multiplier = mats[..., 0, 0] if axis_type == \"extrinsic\" else 1\n where_zero = np.isclose(np.sin(beta), 0, atol=atol)\n\n gamma = np.where(where_zero, 0, np.arctan2(mats[..., 0, 2], -mats[..., 0, 1]))\n alpha = np.where(where_zero, 0, np.arctan2(mats[..., 2, 0], mats[..., 1, 0]))\n zero_terms = np.arctan2(-multiplier * mats[..., 1, 2], mats[..., 2, 2])\n elif convention == \"xyx\":\n beta = np.arccos(mats[..., 0, 0])\n multiplier = mats[..., 0, 0] if axis_type == \"extrinsic\" else 1\n where_zero = np.isclose(np.sin(beta), 0, atol=atol)\n\n gamma = np.where(where_zero, 0, np.arctan2(mats[..., 0, 1], mats[..., 0, 2]))\n alpha = np.where(where_zero, 0, np.arctan2(mats[..., 1, 0], -mats[..., 2, 0]))\n zero_terms = np.arctan2(multiplier * mats[..., 2, 1], mats[..., 1, 1])\n elif convention == \"yxy\":\n beta = np.arccos(mats[..., 1, 1])\n multiplier = mats[..., 1, 1] if axis_type == \"extrinsic\" else 1\n where_zero = np.isclose(np.sin(beta), 0, atol=atol)\n\n gamma = np.where(where_zero, 0, np.arctan2(mats[..., 1, 0], -mats[..., 1, 2]))\n alpha = np.where(where_zero, 0, np.arctan2(mats[..., 0, 1], mats[..., 2, 1]))\n zero_terms = np.arctan2(-multiplier * mats[..., 2, 0], mats[..., 0, 0])\n elif convention == \"yzy\":\n beta = np.arccos(mats[..., 1, 1])\n multiplier = mats[..., 1, 1] if axis_type == \"extrinsic\" else 1\n where_zero = np.isclose(np.sin(beta), 0, atol=atol)\n\n gamma = np.where(where_zero, 0, np.arctan2(mats[..., 1, 2], mats[..., 1, 0]))\n alpha = np.where(where_zero, 0, np.arctan2(mats[..., 2, 1], -mats[..., 0, 1]))\n zero_terms = np.arctan2(multiplier * mats[..., 0, 2], mats[..., 2, 2])\n elif convention == \"zyz\":\n beta = np.arccos(mats[..., 2, 2])\n multiplier = mats[..., 2, 2] if axis_type == \"extrinsic\" else 1\n where_zero = np.isclose(np.sin(beta), 0, atol=atol)\n\n gamma = np.where(where_zero, 0, np.arctan2(mats[..., 2, 1], -mats[..., 2, 0]))\n alpha = np.where(where_zero, 0, np.arctan2(mats[..., 1, 2], mats[..., 0, 2]))\n zero_terms = np.arctan2(-multiplier * mats[..., 0, 1], mats[..., 1, 1])\n elif convention == \"zxz\":\n beta = np.arccos(mats[..., 2, 2])\n multiplier = mats[..., 2, 2] if axis_type == \"extrinsic\" else 1\n where_zero = np.isclose(np.sin(beta), 0, atol=atol)\n\n gamma = np.where(where_zero, 0, np.arctan2(mats[..., 2, 0], mats[..., 2, 1]))\n alpha = np.where(where_zero, 0, np.arctan2(mats[..., 0, 2], -mats[..., 1, 2]))\n zero_terms = np.arctan2(multiplier * mats[..., 1, 0], mats[..., 0, 0])\n # Tait-Bryan angles\n elif convention == \"xzy\":\n beta = np.arcsin(-mats[..., 0, 1])\n where_zero = np.isclose(np.cos(beta), 0, atol=atol)\n\n gamma = np.where(where_zero, 0, np.arctan2(mats[..., 0, 2], mats[..., 0, 0]))\n alpha = np.where(where_zero, 0, np.arctan2(mats[..., 2, 1], mats[..., 1, 1]))\n zero_terms = np.arctan2(-mats[..., 1, 2], mats[..., 2, 2])\n elif convention == \"xyz\":\n beta = np.arcsin(mats[..., 0, 2])\n multiplier = mats[..., 0, 2] if axis_type == \"extrinsic\" else 1\n where_zero = np.isclose(np.cos(beta), 0, atol=atol)\n\n gamma = np.where(where_zero, 0, np.arctan2(-mats[..., 0, 1], mats[..., 0, 0]))\n alpha = np.where(where_zero, 0, np.arctan2(-mats[..., 1, 2], mats[..., 2, 2]))\n zero_terms = np.arctan2(multiplier * mats[..., 2, 1], mats[..., 1, 1])\n elif convention == \"yxz\":\n beta = np.arcsin(-mats[..., 1, 2])\n multiplier = mats[..., 1, 2] if axis_type == \"extrinsic\" else 1\n where_zero = np.isclose(np.cos(beta), 0, atol=atol)\n\n gamma = np.where(where_zero, 0, np.arctan2(mats[..., 1, 0], mats[..., 1, 1]))\n alpha = np.where(where_zero, 0, np.arctan2(mats[..., 0, 2], mats[..., 2, 2]))\n zero_terms = np.arctan2(-multiplier * mats[..., 2, 0], mats[..., 0, 0])\n elif convention == \"yzx\":\n beta = np.arcsin(mats[..., 1, 0])\n multiplier = mats[..., 1, 0] if axis_type == \"extrinsic\" else 1\n where_zero = np.isclose(np.cos(beta), 0, atol=atol)\n\n gamma = np.where(where_zero, 0, np.arctan2(-mats[..., 1, 2], mats[..., 1, 1]))\n alpha = np.where(where_zero, 0, np.arctan2(-mats[..., 2, 0], mats[..., 0, 0]))\n zero_terms = np.arctan2(multiplier * mats[..., 0, 2], mats[..., 2, 2])\n elif convention == \"zyx\":\n beta = np.arcsin(-mats[..., 2, 0])\n where_zero = np.isclose(np.cos(beta), 0, atol=atol)\n\n gamma = np.where(where_zero, 0, np.arctan2(mats[..., 2, 1], mats[..., 2, 2]))\n alpha = np.where(where_zero, 0, np.arctan2(mats[..., 1, 0], mats[..., 0, 0]))\n zero_terms = np.arctan2(-mats[..., 0, 1], mats[..., 1, 1])\n elif convention == \"zxy\":\n beta = np.arcsin(mats[..., 2, 1])\n multiplier = mats[..., 2, 1] if axis_type == \"extrinsic\" else 1\n where_zero = np.isclose(np.cos(beta), 0, atol=atol)\n\n gamma = np.where(where_zero, 0, np.arctan2(-mats[..., 2, 0], mats[..., 2, 2]))\n alpha = np.where(where_zero, 0, np.arctan2(-mats[..., 0, 1], mats[..., 1, 1]))\n zero_terms = np.arctan2(multiplier * mats[..., 1, 0], mats[..., 0, 0])\n else:\n raise ValueError(\"Unknown convention selected!\")\n\n # For extrinsic, swap back alpha and gamma.\n if axis_type == \"extrinsic\":\n tmp = alpha\n alpha = gamma\n gamma = tmp\n\n # By convention, the zero terms that we calculate are always based on\n # setting gamma to zero and applying to alpha. We assign them after the\n # fact to enable the memcopy-free swap of alpha and gamma for extrinsic\n # angles. For Python 2 compatibility, we need to index appropriately.\n try:\n alpha[where_zero] = zero_terms[where_zero]\n except IndexError:\n # This is necessary for Python 2 compatibility and limitations with the\n # indexing behavior. Since the only possible case is a single set of\n # inputs, we can just skip any indexing and overwrite directly if\n # needed.\n if where_zero:\n alpha = zero_terms\n return np.stack((alpha, beta, gamma), axis=-1)\n\n\ndef from_matrix(mat, require_orthogonal=True):\n r\"\"\"Convert the rotation matrices mat to quaternions.\n\n This method uses the algorithm described by Bar-Itzhack in [Itzhack00]_.\n The idea is to construct a matrix K whose largest eigenvalue corresponds\n to the desired quaternion. One of the strengths of the algorithm is that\n for nonorthogonal matrices it gives the closest quaternion representation\n rather than failing outright.\n\n .. [Itzhack00] Itzhack Y. Bar-Itzhack. \"New Method for Extracting the\n Quaternion from a Rotation Matrix\", Journal of Guidance, Control, and\n Dynamics, Vol. 23, No. 6 (2000), pp. 1085-1087\n https://doi.org/10.2514/2.4654\n\n Args:\n mat ((..., 3, 3) :class:`numpy.ndarray`): An array of rotation matrices.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: The corresponding rotation quaternions.\n\n Example::\n\n >>> rowan.from_matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n array([ 1., -0., -0., -0.])\n \"\"\"\n mat = np.asarray(mat)\n if require_orthogonal and not np.allclose(np.linalg.det(mat), 1):\n raise ValueError(\n \"Not all of your matrices are orthogonal. \\\nPlease ensure that there are no improper rotations. \\\nIf this was intentional, set require_orthogonal to \\\nFalse when calling this function.\"\n )\n\n K = np.zeros(mat.shape[:-2] + (4, 4))\n K[..., 0, 0] = mat[..., 0, 0] - mat[..., 1, 1] - mat[..., 2, 2]\n K[..., 0, 1] = mat[..., 1, 0] + mat[..., 0, 1]\n K[..., 0, 2] = mat[..., 2, 0] + mat[..., 0, 2]\n K[..., 0, 3] = mat[..., 1, 2] - mat[..., 2, 1]\n K[..., 1, 0] = mat[..., 1, 0] + mat[..., 0, 1]\n K[..., 1, 1] = mat[..., 1, 1] - mat[..., 0, 0] - mat[..., 2, 2]\n K[..., 1, 2] = mat[..., 2, 1] + mat[..., 1, 2]\n K[..., 1, 3] = mat[..., 2, 0] - mat[..., 0, 2]\n K[..., 2, 0] = mat[..., 2, 0] + mat[..., 0, 2]\n K[..., 2, 1] = mat[..., 2, 1] + mat[..., 1, 2]\n K[..., 2, 2] = mat[..., 2, 2] - mat[..., 0, 0] - mat[..., 1, 1]\n K[..., 2, 3] = mat[..., 0, 1] - mat[..., 1, 0]\n K[..., 3, 0] = mat[..., 1, 2] - mat[..., 2, 1]\n K[..., 3, 1] = mat[..., 2, 0] - mat[..., 0, 2]\n K[..., 3, 2] = mat[..., 0, 1] - mat[..., 1, 0]\n K[..., 3, 3] = mat[..., 0, 0] + mat[..., 1, 1] + mat[..., 2, 2]\n K = K / 3.0\n\n _, v = np.linalg.eigh(K)\n # The conventions in the paper are very confusing for quaternions in terms\n # of the order of the components\n return np.concatenate((v[..., -1, -1, np.newaxis], -v[..., :-1, -1]), axis=-1)\n\n\ndef to_matrix(q, require_unit=True):\n r\"\"\"Convert quaternions into rotation matrices.\n\n Uses the conversion described on `Wikipedia\n <https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Quaternion-derived_rotation_matrix>`_.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): An array of quaternions.\n\n Returns:\n (..., 3, 3) :class:`numpy.ndarray`: The corresponding rotation matrices.\n\n Example::\n\n >>> rowan.to_matrix([1, 0, 0, 0])\n array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]])\n \"\"\"\n q = np.asarray(q)\n\n s = norm(q)\n if np.any(s == 0.0):\n raise ZeroDivisionError(\"At least one element of q has approximately zero norm\")\n elif require_unit and not np.allclose(s, 1.0):\n raise ValueError(\n \"Not all quaternions in q are unit quaternions. \\\nIf this was intentional, please set require_unit to False when \\\ncalling this function.\"\n )\n m = np.empty(q.shape[:-1] + (3, 3))\n s **= -1.0 # For consistency with Wikipedia notation\n m[..., 0, 0] = 1.0 - 2 * s * (q[..., 2] ** 2 + q[..., 3] ** 2)\n m[..., 0, 1] = 2 * (q[..., 1] * q[..., 2] - q[..., 3] * q[..., 0])\n m[..., 0, 2] = 2 * (q[..., 1] * q[..., 3] + q[..., 2] * q[..., 0])\n m[..., 1, 0] = 2 * (q[..., 1] * q[..., 2] + q[..., 3] * q[..., 0])\n m[..., 1, 1] = 1.0 - 2 * (q[..., 1] ** 2 + q[..., 3] ** 2)\n m[..., 1, 2] = 2 * (q[..., 2] * q[..., 3] - q[..., 1] * q[..., 0])\n m[..., 2, 0] = 2 * (q[..., 1] * q[..., 3] - q[..., 2] * q[..., 0])\n m[..., 2, 1] = 2 * (q[..., 2] * q[..., 3] + q[..., 1] * q[..., 0])\n m[..., 2, 2] = 1.0 - 2 * (q[..., 1] ** 2 + q[..., 2] ** 2)\n return m\n\n\ndef from_axis_angle(axes, angles):\n r\"\"\"Find quaternions to rotate a specified angle about a specified axis.\n\n All angles are assumed to be **counterclockwise** rotations about the axis.\n\n Args:\n axes ((..., 3) :class:`numpy.ndarray`):\n An array of vectors (the axes).\n angles (float or (..., 1) :class:`numpy.ndarray`):\n An array of angles in radians. Will be broadcasted to match shape of axes\n as needed.\n\n Returns:\n (..., 4) :class:`numpy.ndarray`: The corresponding rotation quaternions.\n\n Example::\n\n >>> import numpy as np\n >>> rowan.from_axis_angle([[1, 0, 0]], np.pi/3)\n array([[0.8660254, 0.5 , 0. , 0. ]])\n \"\"\"\n axes = np.asarray(axes)\n\n # First reshape angles and compute the half angle\n bc = np.broadcast(angles, axes[..., 0])\n angles = np.broadcast_to(angles, bc.shape)[..., np.newaxis]\n axes = np.broadcast_to(axes, bc.shape + (3,))\n ha = angles / 2.0\n\n # Normalize the vector\n u = _normalize_vec(axes)\n\n # Compute the components of the quaternions\n scalar_comp = np.cos(ha)\n vec_comp = np.sin(ha) * u\n\n return np.concatenate((scalar_comp, vec_comp), axis=-1)\n\n\ndef to_axis_angle(q):\n r\"\"\"Convert the quaternions in q to axis-angle representations.\n\n The output angles are **counterclockwise** rotations about the axis.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): An array of quaternions.\n\n Returns:\n tuple[(..., 3) :class:`numpy.ndarray`, (...) :class:`numpy.ndarray`]:\n The axes and the angles (in radians).\n\n Example::\n\n >>> rowan.to_axis_angle([[1, 0, 0, 0]])\n (array([[0., 0., 0.]]), array([0.]))\n \"\"\"\n q = np.asarray(q)\n _validate_unit(q)\n\n angles = 2 * np.atleast_1d(np.arccos(q[..., 0]))\n sines = np.sin(angles / 2)\n # Avoid divide by zero issues; these values will not be used\n sines[sines == 0] = 1\n axes = np.where(\n angles[..., np.newaxis] != 0, q[..., 1:] / sines[..., np.newaxis], 0\n )\n\n return axes, angles\n\n\ndef equal(p, q):\n r\"\"\"Check whether two sets of quaternions are equal.\n\n This function is a simple wrapper that checks array\n equality and then aggregates along the quaternion axis.\n\n Args:\n p ((..., 4) :class:`numpy.ndarray`): First array of quaternions.\n q ((..., 4) :class:`numpy.ndarray`): Second array of quaternions.\n\n Returns:\n (...) :class:`numpy.ndarray` of bool: Whether ``p`` and ``q`` are equal.\n\n Example::\n\n >>> rowan.equal([1, 0, 0, 0], [1, 0, 0, 0])\n True\n \"\"\"\n return np.all(p == q, axis=-1)\n\n\ndef not_equal(p, q):\n r\"\"\"Check whether two sets of quaternions are not equal.\n\n This function is a simple wrapper that checks array\n equality and then aggregates along the quaternion axis.\n\n Args:\n p ((..., 4) :class:`numpy.ndarray`): First array of quaternions.\n q ((..., 4) :class:`numpy.ndarray`): Second array of quaternions.\n\n Returns:\n (...) :class:`numpy.ndarray` of bool: Whether ``p`` and ``q`` are unequal.\n\n Example::\n\n >>> rowan.not_equal([-1, 0, 0, 0], [1, 0, 0, 0])\n True\n \"\"\"\n return np.any(p != q, axis=-1)\n\n\ndef isnan(q):\n r\"\"\"Test element-wise for NaN quaternions.\n\n A quaternion is defined as NaN if any elements are NaN.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n\n Returns:\n (...) :class:`numpy.ndarray` of bool: Whether ``q`` is NaN.\n\n Example::\n\n >>> import numpy as np\n >>> rowan.isnan([np.nan, 0, 0, 0])\n True\n \"\"\"\n return np.any(np.isnan(q), axis=-1)\n\n\ndef isinf(q):\n r\"\"\"Test element-wise for infinite quaternions.\n\n A quaternion is defined as infinite if any elements are infinite.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions\n\n Returns:\n (...) :class:`numpy.ndarray` of bool: Whether ``q`` is infinite.\n\n Example::\n\n >>> import numpy as np\n >>> rowan.isinf([np.nan, 0, 0, 0])\n False\n \"\"\"\n return np.any(np.isinf(q), axis=-1)\n\n\ndef isfinite(q):\n r\"\"\"Test element-wise for finite quaternions.\n\n A quaternion is defined as finite if all elements are finite.\n\n Args:\n q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.\n\n Returns:\n (...) :class:`numpy.ndarray` of bool: Whether ``q`` is finite.\n\n Example::\n\n >>> rowan.isfinite([1, 0, 0, 0])\n True\n \"\"\"\n return np.all(np.isfinite(q), axis=-1)\n\n\ndef allclose(p, q, **kwargs):\n r\"\"\"Check whether two sets of quaternions are all close.\n\n This is a direct wrapper of the corresponding NumPy function.\n\n Args:\n p ((..., 4) :class:`numpy.ndarray`): First array of quaternions.\n q ((..., 4) :class:`numpy.ndarray`): Second array of quaternions.\n \\*\\*kwargs: Keyword arguments to pass to np.allclose.\n\n Returns:\n bool: Whether all of ``p`` and ``q`` are close.\n\n Example::\n\n >>> rowan.allclose([1, 0, 0, 0], [1, 0, 0, 0])\n True\n \"\"\"\n return np.allclose(p, q, **kwargs)\n\n\ndef isclose(p, q, **kwargs):\n r\"\"\"Element-wise check of whether two sets of quaternions are close.\n\n This function is a simple wrapper that checks using the\n corresponding NumPy function and then aggregates along\n the quaternion axis.\n\n Args:\n p ((..., 4) :class:`numpy.ndarray`): First array of quaternions.\n q ((..., 4) :class:`numpy.ndarray`): Second array of quaternions.\n \\*\\*kwargs: Keyword arguments to pass to np.isclose.\n\n Returns:\n (...) :class:`numpy.ndarray` of bool:\n Whether ``p`` and ``q`` are close element-wise.\n\n Example::\n\n >>> rowan.isclose([[1, 0, 0, 0]], [[1, 0, 0, 0]])\n array([ True])\n \"\"\"\n return np.all(np.isclose(p, q, **kwargs), axis=-1)\n" ]
[ [ "numpy.isclose", "numpy.arccos", "numpy.dot", "numpy.exp", "numpy.where", "numpy.cos", "numpy.broadcast_to", "numpy.concatenate", "numpy.sin", "numpy.linalg.norm", "numpy.empty", "numpy.log", "numpy.arcsin", "numpy.linalg.eigh", "numpy.isfinite", "numpy.cross", "numpy.array", "numpy.zeros", "numpy.linalg.det", "numpy.allclose", "numpy.stack", "numpy.arctan2", "numpy.logical_not", "numpy.isinf", "numpy.isnan", "numpy.asarray", "numpy.broadcast_arrays", "numpy.broadcast", "numpy.sum", "numpy.any", "numpy.all" ] ]
maria-cardona/Hippocampal-Volume-Quantification-in-Alzheimer-s-Progression
[ "e4ec762ca78f63dd15850f638c0e6a04e6ab3ba4" ]
[ "section 3/src/inference_dcm.py" ]
[ "\"\"\"\nHere we do inference on a DICOM volume, constructing the volume first, and then sending it to the\nclinical archive\n\nThis code will do the following:\n 1. Identify the series to run HippoCrop.AI algorithm on from a folder containing multiple studies\n 2. Construct a NumPy volume from a set of DICOM files\n 3. Run inference on the constructed volume\n 4. Create report from the inference\n 5. Call a shell script to push report to the storage archive\n\"\"\"\n\nimport os\nimport sys\nimport datetime\nimport time\nimport shutil\nimport subprocess\n\nimport numpy as np\nimport pydicom\n\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\nfrom inference.UNetInferenceAgent import UNetInferenceAgent\n\ndef load_dicom_volume_as_numpy_from_list(dcmlist):\n \"\"\"Loads a list of PyDicom objects a Numpy array.\n Assumes that only one series is in the array\n\n Arguments:\n dcmlist {list of PyDicom objects} -- path to directory\n\n Returns:\n tuple of (3D volume, header of the 1st image)\n \"\"\"\n\n # In the real world you would do a lot of validation here\n slices = [np.flip(dcm.pixel_array).T for dcm in sorted(dcmlist, key=lambda dcm: dcm.InstanceNumber)]\n\n # Make sure that you have correctly constructed the volume from your axial slices!\n hdr = dcmlist[0]\n\n # We return header so that we can inspect metadata properly.\n # Since for our purposes we are interested in \"Series\" header, we grab header of the\n # first file (assuming that any instance-specific values will be ighored - common approach)\n # We also zero-out Pixel Data since the users of this function are only interested in metadata\n hdr.PixelData = None\n return (np.stack(slices, 2), hdr)\n\ndef get_predicted_volumes(pred):\n \"\"\"Gets volumes of two hippocampal structures from the predicted array\n\n Arguments:\n pred {Numpy array} -- array with labels. Assuming 0 is bg, 1 is anterior, 2 is posterior\n\n Returns:\n A dictionary with respective volumes\n \"\"\"\n\n # TASK: Compute the volume of your hippocampal prediction\n # <YOUR CODE HERE>\n volume_ant = np.sum(pred == 1)\n volume_post = np.sum(pred==2)\n total_volume = volume_ant + volume_post\n\n return {\"anterior\": volume_ant, \"posterior\": volume_post, \"total\": total_volume}\n\ndef create_report(inference, header, orig_vol, pred_vol):\n \"\"\"Generates an image with inference report\n\n Arguments:\n inference {Dictionary} -- dict containing anterior, posterior and full volume values\n header {PyDicom Dataset} -- DICOM header\n orig_vol {Numpy array} -- original volume\n pred_vol {Numpy array} -- predicted label\n\n Returns:\n PIL image\n \"\"\"\n\n # The code below uses PIL image library to compose an RGB image that will go into the report\n # A standard way of storing measurement data in DICOM archives is creating such report and\n # sending them on as Secondary Capture IODs (http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_A.8.html)\n # Essentially, our report is just a standard RGB image, with some metadata, packed into \n # DICOM format. \n\n pimg = Image.new(\"RGB\", (1000, 1000))\n draw = ImageDraw.Draw(pimg)\n\n header_font = ImageFont.truetype(\"src/assets/Roboto-Regular.ttf\", size=40)\n main_font = ImageFont.truetype(\"src/assets/Roboto-Regular.ttf\", size=20)\n\n slice_nums = [orig_vol.shape[2]//3, orig_vol.shape[2]//2, orig_vol.shape[2]*3//4] # is there a better choice?\n\n # TASK: Create the report here and show information that you think would be relevant to\n # clinicians. A sample code is provided below, but feel free to use your creative \n # genius to make if shine. After all, the is the only part of all our machine learning \n # efforts that will be visible to the world. The usefulness of your computations will largely\n # depend on how you present them.\n draw.text((10, 0), \"HippoVolume.AI\", (255, 255, 255), font=header_font)\n draw.multiline_text((10, 90),\n f\"Patient ID: {header.PatientID} \\n \\\n Study Description : {header.StudyDescription}\\n \\\n Series Description: {header.SeriesDescription}\\n \\\n Modality: {header.Modality}\\n \\\n Image Type: {header.ImageType}\\n \\\n Anterior Volume: {inference['anterior']}\\n \\\n Posterior Volume: {inference['posterior']}\\n \\\n Total Volume: {inference['total']}\\n \\\n Predictions and Axial Slices: {slice_nums}\\n\",\n (255, 255, 255), font=main_font)\n\n # STAND-OUT SUGGESTION:\n # In addition to text data in the snippet above, can you show some images?\n # Think, what would be relevant to show? Can you show an overlay of mask on top of original data?\n # Hint: here's one way to convert a numpy array into a PIL image and draw it inside our pimg object:\n #\n # Create a PIL image from array:\n # Numpy array needs to flipped, transposed and normalized to a matrix of values in the range of [0..255]\n # nd_img = np.flip((slice/np.max(slice))*0xff).T.astype(np.uint8)\n # This is how you create a PIL image from numpy array\n # pil_i = Image.fromarray(nd_img, mode=\"L\").convert(\"RGBA\").resize(<dimensions>)\n # Paste the PIL image into our main report image object (pimg)\n # pimg.paste(pil_i, box=(10, 280))\n slice = orig_vol[0, :, :]\n nd_img = np.flip((slice/np.max(slice))*0xff).T.astype(np.uint8)\n pil_i = Image.fromarray(nd_img, mode=\"L\").convert(\n \"RGBA\").resize((500, 500))\n pimg.paste(pil_i, box=(50, 500))\n \n return pimg\n\ndef save_report_as_dcm(header, report, path):\n \"\"\"Writes the supplied image as a DICOM Secondary Capture file\n\n Arguments:\n header {PyDicom Dataset} -- original DICOM file header\n report {PIL image} -- image representing the report\n path {Where to save the report}\n\n Returns:\n N/A\n \"\"\"\n\n # Code below creates a DICOM Secondary Capture instance that will be correctly\n # interpreted by most imaging viewers including our OHIF\n # The code here is complete as it is unlikely that as a data scientist you will \n # have to dive that deep into generating DICOMs. However, if you still want to understand\n # the subject, there are some suggestions below\n\n # Set up DICOM metadata fields. Most of them will be the same as original file header\n out = pydicom.Dataset(header)\n\n out.file_meta = pydicom.Dataset()\n out.file_meta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian\n\n # STAND OUT SUGGESTION: \n # If you want to understand better the generation of valid DICOM, remove everything below\n # and try writing your own DICOM generation code from scratch.\n # Refer to this part of the standard to see what are the requirements for the valid\n # Secondary Capture IOD: http://dicom.nema.org/medical/dicom/2019e/output/html/part03.html#sect_A.8\n # The Modules table (A.8-1) contains a list of modules with a notice which ones are mandatory (M)\n # and which ones are conditional (C) and which ones are user-optional (U)\n # Note that we are building an RGB image which would have three 8-bit samples per pixel\n # Also note that writing code that generates valid DICOM has a very calming effect\n # on mind and body :)\n\n out.is_little_endian = True\n out.is_implicit_VR = False\n\n # We need to change class to Secondary Capture\n out.SOPClassUID = \"1.2.840.10008.5.1.4.1.1.7\"\n out.file_meta.MediaStorageSOPClassUID = out.SOPClassUID\n\n # Our report is a separate image series of one image\n out.SeriesInstanceUID = pydicom.uid.generate_uid()\n out.SOPInstanceUID = pydicom.uid.generate_uid()\n out.file_meta.MediaStorageSOPInstanceUID = out.SOPInstanceUID\n out.Modality = \"OT\" # Other\n out.SeriesDescription = \"HippoVolume.AI\"\n\n out.Rows = report.height\n out.Columns = report.width\n\n out.ImageType = r\"DERIVED\\PRIMARY\\AXIAL\" # We are deriving this image from patient data\n out.SamplesPerPixel = 3 # we are building an RGB image.\n out.PhotometricInterpretation = \"RGB\"\n out.PlanarConfiguration = 0 # means that bytes encode pixels as R1G1B1R2G2B2... as opposed to R1R2R3...G1G2G3...\n out.BitsAllocated = 8 # we are using 8 bits/pixel\n out.BitsStored = 8\n out.HighBit = 7\n out.PixelRepresentation = 0\n\n # Set time and date\n dt = datetime.date.today().strftime(\"%Y%m%d\")\n tm = datetime.datetime.now().strftime(\"%H%M%S\")\n out.StudyDate = dt\n out.StudyTime = tm\n out.SeriesDate = dt\n out.SeriesTime = tm\n\n out.ImagesInAcquisition = 1\n\n # We empty these since most viewers will then default to auto W/L\n out.WindowCenter = \"\"\n out.WindowWidth = \"\"\n\n # Data imprinted directly into image pixels is called \"burned in annotation\"\n out.BurnedInAnnotation = \"YES\"\n\n out.PixelData = report.tobytes()\n\n pydicom.filewriter.dcmwrite(path, out, write_like_original=False)\n\ndef get_series_for_inference(path):\n \"\"\"Reads multiple series from one folder and picks the one\n to run inference on.\n\n Arguments:\n path {string} -- location of the DICOM files\n\n Returns:\n Numpy array representing the series\n \"\"\"\n\n # Here we are assuming that path is a directory that contains a full study as a collection\n # of files\n # We are reading all files into a list of PyDicom objects so that we can filter them later\n dicoms = [pydicom.dcmread(os.path.join(path, f)) for f in os.listdir(path)]\n# dicoms = []\n# for dir , subdirs, files in os.walk(path):\n# for subdir in subdirs:\n# dicoms.extend([pydicom.dcmread(os.path.join(path, subdir, f)) for f in os.listdir(os.path.join(path, subdir))])\n\n # TASK: create a series_for_inference variable that will contain a list of only \n # those PyDicom objects that represent files that belong to the series that you \n # will run inference on.\n # It is important to note that radiological modalities most often operate in terms\n # of studies, and it will most likely be on you to establish criteria for figuring \n # out which one of the multiple series sent by the scanner is the one you need to feed to \n # your algorithm. In our case it's rather easy - we have reached an agreement with \n # people who configured the HippoCrop tool and they label the output of their tool in a \n # certain way. Can you figure out which is that? \n # Hint: inspect the metadata of HippoCrop series\n\n # <YOUR CODE HERE>\n \n series_for_inference = []\n \n# for i in range(len(dicoms)):\n# # print(dicoms[i], dicoms[i].SeriesDescription)\n# # print(dicoms[i].SeriesDescription)\n# if (dicoms[i].SeriesDescription == \"HippoCrop\"):\n# # print(dicoms[i].SeriesDescription)\n# series_for_inference.append(dicoms[i])\n\n \n for dicom in dicoms:\n try:\n if dicom.SeriesDescription == 'HippoCrop':\n series_for_inference.append(dicom)\n except KeyError:\n continue\n \n# series_path = [dir for dir, subdirs, files in os.walk(path) if 'HCropVolume' in dir]\n# chosen_path = np.random.choice(series_path,size=int(len(series_path) > 0))\n# print(chosen_path)\n# series_for_inference = [pydicom.dcmread(os.path.join(chosen_path, f)) for f in os.listdir(chosen_path)]\n\n\n # Check if there are more than one series (using set comprehension).\n if len({f.SeriesInstanceUID for f in series_for_inference}) != 1:\n print(\"Error: can not figure out what series to run inference on\")\n return []\n\n return series_for_inference\n\ndef os_command(command):\n # Comment this if running under Windows\n sp = subprocess.Popen([\"/bin/bash\", \"-i\", \"-c\", command])\n sp.communicate()\n\n # Uncomment this if running under Windows\n os.system(command)\n\nif __name__ == \"__main__\":\n # This code expects a single command line argument with link to the directory containing\n # routed studies\n if len(sys.argv) != 2:\n print(\"You should supply one command line argument pointing to the routing folder. Exiting.\")\n sys.exit()\n\n # Find all subdirectories within the supplied directory. We assume that \n # one subdirectory contains a full study\n subdirs = [os.path.join(sys.argv[1], d) for d in os.listdir(sys.argv[1]) if\n os.path.isdir(os.path.join(sys.argv[1], d))]\n\n # Get the latest directory\n study_dir = sorted(subdirs, key=lambda dir: os.stat(dir).st_mtime, reverse=True)[1]\n\n print(f\"Looking for series to run inference on in directory {study_dir}...\")\n\n # TASK: get_series_for_inference is not complete. Go and complete it\n volume, header = load_dicom_volume_as_numpy_from_list(get_series_for_inference(study_dir))\n print(f\"Found series of {volume.shape[2]} axial slices\")\n\n print(\"HippoVolume.AI: Running inference...\")\n # TASK: Use the UNetInferenceAgent class and model parameter file from the previous section\n inference_agent = UNetInferenceAgent(\n device=\"cpu\",\n parameter_file_path=r\"/home/workspace/model/model.pth\")\n\n # Run inference\n # TASK: single_volume_inference_unpadded takes a volume of arbitrary size \n # and reshapes y and z dimensions to the patch size used by the model before \n # running inference. Your job is to implement it.\n pred_label = inference_agent.single_volume_inference_unpadded(np.array(volume))\n # TASK: get_predicted_volumes is not complete. Go and complete it\n pred_volumes = get_predicted_volumes(pred_label)\n\n # Create and save the report\n print(\"Creating and pushing report...\")\n report_save_path = r\"/home/workspace/out/report.dcm\"\n # TASK: create_report is not complete. Go and complete it. \n # STAND OUT SUGGESTION: save_report_as_dcm has some suggestions if you want to expand your\n # knowledge of DICOM format\n report_img = create_report(pred_volumes, header, volume, pred_label)\n save_report_as_dcm(header, report_img, report_save_path)\n\n # Send report to our storage archive\n # TASK: Write a command line string that will issue a DICOM C-STORE request to send our report\n # to our Orthanc server (that runs on port 4242 of the local machine), using storescu tool\n os_command(\"storescu 127.0.0.1 4242 -v -aec HIPPOAI +r +sd {report_save_path}\")\n\n # This line will remove the study dir if run as root user\n # Sleep to let our StoreSCP server process the report (remember - in our setup\n # the main archive is routing everyting that is sent to it, including our freshly generated\n # report) - we want to give it time to save before cleaning it up\n time.sleep(2)\n shutil.rmtree(study_dir, onerror=lambda f, p, e: print(f\"Error deleting: {e[1]}\"))\n\n print(f\"Inference successful on {header['SOPInstanceUID'].value}, out: {pred_label.shape}\",\n f\"volume ant: {pred_volumes['anterior']}, \",\n f\"volume post: {pred_volumes['posterior']}, total volume: {pred_volumes['total']}\")\n" ]
[ [ "numpy.max", "numpy.array", "numpy.sum", "numpy.stack", "numpy.flip" ] ]
prasoongoyal/rl-learn
[ "0401ca8c60b11adbc6fddfda12c03876a45c2ad0" ]
[ "rl/environment.py" ]
[ "import gym\nimport sys\nimport random\nimport numpy as np\nfrom scipy.misc import imresize\nfrom utils import *\nfrom PIL import Image\nfrom copy import deepcopy\nimport tensorflow as tf\nfrom itertools import groupby\nimport pdb\nimport pickle\nimport torch\nsys.path.insert(0, 'learn/')\nfrom learn_model import LearnModel\nfrom tasks import *\n\nclass GymEnvironment(object):\n def __init__(self, args, gamma):\n self.args = args\n self.env = gym.make(ENV_NAME)\n\n self.dims = (SCREEN_WIDTH, SCREEN_HEIGHT)\n\n self._screen = None\n self.reward = 0\n self.terminal = True\n\n self.reset()\n if self.args.lang_coeff > 0:\n self.setup_language_network()\n self.gamma = gamma\n\n # aggregates to compute Spearman correlation coefficients\n self.action_vectors_list = []\n self.rewards_list = []\n\n def reset(self):\n self.n_steps = 0\n self.action_vector = np.zeros(N_ACTIONS)\n self.potentials_list = []\n\n def new_game(self, from_random_game=False):\n self._screen = self.env.reset()\n self._step(0)\n self.initial_frame = None\n return self.screen, 0, 0, self.terminal\n\n def new_random_game(self):\n self.new_game(True)\n for _ in xrange(random.randint(0, RANDOM_START - 1)):\n self._step(0)\n return self.screen, 0, 0, self.terminal\n\n def agent_pos(self):\n x, y = self.env.ale.getRAM()[42:44]\n return int(x), int(y)\n\n def skull_pos(self):\n return int(self.env.ale.getRAM()[47])\n\n def room(self):\n return int(self.env.ale.getRAM()[3])\n\n def has_key(self):\n return int(self.env.ale.getRAM()[101])\n\n def orb_collected(self):\n return int(self.env.ale.getRAM()[49])\n\n def save_state(self, filename):\n state = self.env.clone_full_state()\n np.save(filename, state)\n print ('File written : {}'.format(filename))\n\n def load_state(self, filename):\n state = np.load(filename)\n self.env.restore_full_state(state)\n self._step(0)\n\n def repeat_action(self, action, n=1):\n for _ in range(n):\n self._step(action)\n\n def inspect(self):\n screen = self.env.ale.getScreenRGB()\n img = Image.fromarray(screen.astype('uint8'))\n img.save('trajectory/'+str(self.n_steps)+'.png')\n if self.n_steps > 100:\n input('Done')\n\n def new_expt(self):\n if self.args.expt_id == 1:\n self.task = Task1(self)\n elif self.args.expt_id == 2:\n self.task = Task2(self)\n elif self.args.expt_id == 3:\n self.task = Task3(self)\n elif self.args.expt_id == 4:\n self.task = Task4(self)\n elif self.args.expt_id == 5:\n self.task = Task5(self)\n elif self.args.expt_id == 6:\n self.task = Task6(self)\n elif self.args.expt_id == 7:\n self.task = Task7(self)\n elif self.args.expt_id == 8:\n self.task = Task8(self)\n elif self.args.expt_id == 9:\n self.task = Task9(self)\n elif self.args.expt_id == 10:\n self.task = Task10(self)\n elif self.args.expt_id == 11:\n self.task = Task11(self)\n elif self.args.expt_id == 12:\n self.task = Task12(self)\n elif self.args.expt_id == 13:\n self.task = Task13(self)\n elif self.args.expt_id == 14:\n self.task = Task14(self)\n elif self.args.expt_id == 15:\n self.task = Task15(self)\n \n self._step(0)\n self._step(0)\n self._step(0)\n self._step(0)\n for _ in range(random.randint(0, RANDOM_START - 1)):\n self._step(0)\n\n return self.screen, 0, 0, self.terminal\n\n def _step(self, action):\n self._screen, self.reward, self.terminal, _ = self.env.step(action)\n self.n_steps += 1\n\n def _random_step(self):\n action = self.env.action_space.sample()\n self._step(action)\n\n @ property\n def screen(self):\n return imresize(rgb2gray(self._screen)/255., self.dims)\n\n @property\n def action_size(self):\n return self.env.action_space.n\n\n @property\n def lives(self):\n return self.env.ale.lives()\n\n @property\n def action_space(self):\n return self.env.action_space\n\n @property\n def state(self):\n return self.screen, self.reward, self.terminal\n\n def act(self, action):\n start_lives = self.lives\n self.terminal = False\n self.action_vector[action] += 1.\n\n self._step(action)\n\n if start_lives > self.lives:\n self.terminal = True\n \n if not self.terminal:\n goal_reached = self.task.finished()\n else:\n goal_reached = False\n\n if goal_reached:\n self.reward = 1.0\n self.terminal = True\n else:\n self.reward = 0.0\n\n if self.args.lang_coeff > 0.0:\n lang_reward = self.args.lang_coeff * self.compute_language_reward()\n self.reward += lang_reward\n if self.n_steps > MAX_STEPS:\n self.terminal = True\n \n if self.terminal:\n self.reset()\n\n return self.state, goal_reached\n\n def setup_language_network(self):\n self.lang_net_graph = tf.Graph()\n with self.lang_net_graph.as_default():\n self.lang_network = LearnModel('predict', None, self.args.model_dir)\n sentence_id = (self.args.expt_id-1) * 3 + (self.args.descr_id-1)\n lang_data = pickle.load(open('./data/test_lang_data.pkl', 'rb'), encoding='bytes')\n self.lang = lang_data[sentence_id][self.args.lang_enc]\n\n def compute_language_reward(self):\n if self.n_steps < 2:\n logits = None\n else:\n with self.lang_net_graph.as_default():\n logits = self.lang_network.predict([self.action_vector], [self.lang])[0]\n\n if logits is None:\n self.potentials_list.append(0.)\n else:\n e_x = np.exp(logits - np.max(logits))\n self.potentials_list.append(e_x[1] - e_x[0] + self.args.noise * np.random.normal())\n\n self.action_vectors_list.append(list(self.action_vector[k] for k in spearman_corr_coeff_actions))\n self.rewards_list.append(self.potentials_list[-1])\n\n if len(self.potentials_list) > 1:\n lang_result = (self.gamma * self.potentials_list[-1] - self.potentials_list[-2])\n return lang_result\n else:\n return 0.\n\n" ]
[ [ "numpy.max", "numpy.random.normal", "numpy.zeros", "tensorflow.Graph", "numpy.load", "numpy.save" ] ]
abhiyerasi/IDV_MaskRCNN_Image_Segmentation
[ "3a873df1f8b46f05131ff93a7e665c6c54b2ad2b" ]
[ "samples/ivddataset.py" ]
[ "import os\nimport sys\nimport json\nimport datetime\nimport numpy as np\nimport skimage.draw\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\n\n# Path to trained weights file\nCOCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n\n# Directory to save logs and model checkpoints, if not provided\n# through the command line argument --logs\nDEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n############################################################\n# Dataset\n############################################################\n\nclass IDVDataset(utils.Dataset):\n\n def load_idv(self, dataset_dir):\n \"\"\"Load a subset of the Balloon dataset.\n dataset_dir: Root directory of the dataset.\n subset: Subset to load: train or val\n \"\"\"\n # Add classes. We have only three class to add.\n self.add_class(\"idv\", 1, \"dosa\")\n self.add_class(\"idv\", 2, \"idly\")\n self.add_class(\"idv\", 3, \"vada\")\n\n\n # Load annotations\n # VGG Image Annotator saves each image in the form:\n # { 'filename': '28503151_5b5b7ec140_b.jpg',\n # 'regions': {\n # '0': {\n # 'region_attributes': {},\n # 'shape_attributes': {\n # 'all_points_x': [...],\n # 'all_points_y': [...],\n # 'name': 'polygon'}},\n # ... more regions ...\n # },\n # 'size': 100202\n # }\n # We mostly care about the x and y coordinates of each region\n annotations = json.load(open(os.path.join(dataset_dir, \"via_region_data.json\")))\n annotations = list(annotations.values()) # don't need the dict keys\n\n # The VIA tool saves images in the JSON even if they don't have any\n # annotations. Skip unannotated images.\n annotations = [a for a in annotations if a['regions']]\n\n # Add images\n for a in annotations:\n # Get the x, y coordinaets of points of the polygons that make up\n # the outline of each object instance. There are stores in the\n # shape_attributes (see json format above)\n polygons = [r['shape_attributes'] for r in a['regions'].values()]\n objects = [s['region_attributes'] for s in a['regions'].values()]\n\n # load_mask() needs the image size to convert polygons to masks.\n # Unfortunately, VIA doesn't include it in JSON, so we must read\n num_ids = [int(n['class']) for n in objects]\n # the image. This is only managable since the dataset is tiny.\n image_path = os.path.join(dataset_dir, a['filename'])\n image = skimage.io.imread(image_path)\n height, width = image.shape[:2]\n\n self.add_image(\n \"idv\",\n image_id=a['filename'], # use file name as a unique image id\n path=image_path,\n width=width, height=height,\n polygons=polygons,num_ids=num_ids)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # If not a balloon dataset image, delegate to parent class.\n #image_info = self.image_info[image_id]\n info = self.image_info[image_id]\n if info[\"source\"] != \"idv\":\n return super(self.__class__, self).load_mask(image_id)\n num_ids = info['num_ids']\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n #info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n num_ids = np.array(num_ids, dtype=np.int32)\n return mask, num_ids\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"idv\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n\n" ]
[ [ "numpy.array" ] ]
ehgp/data_606_capstone
[ "c4b6fef75471b95ec56689bbcaa2a791d01472c3" ]
[ "trade.py" ]
[ "\"\"\"Trade.\n\nThe module will execute the arbitrage opportunities for you as it finds them.\nCurrently two arbitrage models are being used:\n1.Triangular Arbitrage.\n2.Bellman Ford Optimization.\n\"\"\"\nimport os\nimport yaml\nimport pandas as pd\nfrom kucoin.client import Trade\nimport sqlite3\nimport logging\nimport logging.config\nfrom pathlib import Path\nimport datetime as dt\n\n# pandas controls on how much data to see\npd.set_option(\"display.max_rows\", None)\npd.set_option(\"display.max_columns\", None)\npd.set_option(\"display.width\", None)\npd.set_option(\"display.max_colwidth\", None)\n\n# Logging\npath = Path(os.getcwd())\nPath(\"log\").mkdir(parents=True, exist_ok=True)\nlog_config = Path(path, \"log_config.yaml\")\ntimestamp = \"{:%Y_%m_%d_%H_%M_%S}\".format(dt.datetime.now())\nwith open(log_config, \"r\") as log_file:\n config_dict = yaml.safe_load(log_file.read())\n # Append date stamp to the file name\n log_filename = config_dict[\"handlers\"][\"file\"][\"filename\"]\n base, extension = os.path.splitext(log_filename)\n base2 = \"_\" + os.path.splitext(os.path.basename(__file__))[0] + \"_\"\n log_filename = \"{}{}{}{}\".format(base, base2, timestamp, extension)\n config_dict[\"handlers\"][\"file\"][\"filename\"] = log_filename\n logging.config.dictConfig(config_dict)\nlogger = logging.getLogger(__name__)\n\n\ndef _load_config() -> dict:\n \"\"\"Load the configuration yaml and return dictionary of setttings.\n\n Returns:\n yaml as a dictionary.\n \"\"\"\n config_path = os.path.dirname(os.path.realpath(__file__))\n config_path = os.path.join(config_path, \"parameters.yaml\")\n with open(config_path, \"r\") as config_file:\n config_defs = yaml.safe_load(config_file.read())\n\n if config_defs.values() is None:\n raise ValueError(\"parameters yaml file incomplete\")\n\n return config_defs\n\n\ndef order_handling(order: dict) -> bool:\n \"\"\"Check if order went through.\n\n Requires analysis.py to be running in real time for trade info collection into DB.\n\n Args:\n order: order dictionary containing order ID.\n\n Returns:\n boolean: Shows if order is has been executed in exchange or not.\n \"\"\"\n table = \"trade_info\"\n con = sqlite3.connect(\"db/kucoin.db\")\n df = pd.read_sql_query(\"SELECT * FROM %s\" % (table), con)\n status_order = df[df[\"orderId\"] == order[\"orderId\"]][\"type\"]\n if status_order is None:\n order_handling(order)\n elif status_order == \"filled\":\n return True\n elif status_order == \"cancelled\":\n return False\n else:\n order_handling(order)\n\n\ndef execute_fwd_tri_arbitrage(client: Trade, row: pd.DataFrame, cost: float) -> bool:\n \"\"\"Execute Forward Triangle Arbitrage.\n\n Using the amount that you are willing to initially spend in fiat per triangular trade (3 trades),\n execute all trades one after the other immediately and check for their success.\n If the trade is not successful, discard trade completely.\n Time in force set as 'Fill or Kill' to force trade to be filled, no partial filled trades allowed.\n\n Args:\n client: Trade module for kucoin.client.\n row: Dataframe containing pricing and volume information for tickers.\n cost: initial trade cost.\n\n Returns:\n boolean: Shows if trade successful or not.\n \"\"\"\n size = cost / row[\"ca_bsta\"] # bc_bsta,ba_bstb\n if size < row[\"ca_bstasize\"]:\n order_ca = client.create_limit_order(\n symbol=\"%s-%s\" % (row[\"c\"], row[\"a\"]),\n side=\"buy\",\n price=str(row[\"ca_bsta\"]),\n size=str(size),\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n else:\n size = row[\"ca_bstasize\"]\n order_ca = client.create_limit_order(\n symbol=\"%s-%s\" % (row[\"c\"], row[\"a\"]),\n side=\"buy\",\n price=str(row[\"ca_bsta\"]),\n size=str(size),\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n if order_handling(order_ca) is True:\n size = (row[\"ca_bsta\"] * size) / row[\"bc_bsta\"] # bc_bsta,ba_bstb\n if size < row[\"bc_bstasize\"]:\n order_bc = client.create_limit_order(\n symbol=\"%s-%s\" % (row[\"b\"], row[\"c\"]),\n side=\"buy\",\n price=str(row[\"bc_bsta\"]),\n size=str(size),\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n else:\n size = row[\"bc_bstasize\"]\n order_bc = client.create_limit_order(\n symbol=\"%s-%s\" % (row[\"b\"], row[\"c\"]),\n side=\"buy\",\n price=str(row[\"bc_bsta\"]),\n size=str(size),\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n if order_handling(order_bc) is True:\n size = (row[\"bc_bsta\"] * size) / row[\"ba_bstb\"] # bc_bsta,ba_bstb\n if size < row[\"ba_bstbsize\"]:\n order_bc = client.create_limit_order(\n symbol=\"%s-%s\" % (row[\"b\"], row[\"a\"]),\n side=\"sell\",\n price=str(row[\"ba_bstb\"]),\n size=size,\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n else:\n size = row[\"ba_bstbsize\"]\n order_bc = client.create_limit_order(\n symbol=\"%s-%s\" % (row[\"b\"], row[\"a\"]),\n side=\"sell\",\n price=str(row[\"ba_bstb\"]),\n size=size,\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n if order_handling(order_bc) is True:\n logger.info(\"Forward Triangle Arbitrage successful\")\n return True\n else:\n logger.info(\n \"Forward Triangle Arbitrage Order 3 not filled successfully, cancelling arbitrage op\"\n )\n return False\n else:\n logger.info(\n \"Forward Triangle Arbitrage Order 2 not filled successfully, cancelling arbitrage op\"\n )\n return False\n else:\n logger.info(\n \"Forward Triangle Arbitrage Order 1 not filled successfully, cancelling arbitrage op\"\n )\n return False\n\n\ndef execute_rev_tri_arbitrage(client: Trade, row: pd.DataFrame, cost: float) -> bool:\n \"\"\"Execute Reverse Triangle Arbitrage.\n\n Using the amount that you are willing to initially spend in fiat per triangular trade (3 trades),\n execute all trades one after the other immediately and check for their success.\n If the trade is not successful, discard trade completely.\n Time in force set as 'Fill or Kill' to force trade to be filled, no partial filled trades allowed.\n\n Args:\n client: Trade module for kucoin.client.\n row: Dataframe containing pricing and volume information for tickers.\n cost: initial trade cost.\n\n Returns:\n boolean: Shows if trade successful or not.\n \"\"\"\n size = cost / row[\"ba_bsta\"] # bc_bstb,ca_bstb\n if size < row[\"ba_bstasize\"]:\n order_ca = client.create_limit_order(\n symbol=\"%s-%s\" % (row[\"b\"], row[\"a\"]),\n side=\"buy\",\n price=str(row[\"ba_bsta\"]),\n size=str(size),\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n else:\n size = row[\"ba_bstasize\"]\n order_ca = client.create_limit_order(\n symbol=\"%s-%s\" % (row[\"b\"], row[\"a\"]),\n side=\"buy\",\n price=str(row[\"ba_bsta\"]),\n size=str(size),\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n if order_handling(order_ca) is True:\n size = (row[\"ba_bsta\"] * size) / row[\"bc_bstb\"] # bc_bstb,ca_bstb\n if size < row[\"bc_bstbsize\"]:\n order_bc = client.create_limit_order(\n symbol=\"%s-%s\" % (row[\"b\"], row[\"c\"]),\n side=\"sell\",\n price=str(row[\"bc_bstb\"]),\n size=str(size),\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n else:\n size = row[\"bc_bstbsize\"]\n order_bc = client.create_limit_order(\n symbol=\"%s-%s\" % (row[\"b\"], row[\"c\"]),\n side=\"sell\",\n price=str(row[\"bc_bstb\"]),\n size=str(size),\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n if order_handling(order_bc) is True:\n size = (row[\"bc_bstb\"] * size) / row[\"ca_bstb\"] # bc_bstb,ca_bstb\n if size < row[\"ca_bstbsize\"]:\n order_bc = client.create_limit_order(\n symbol=\"%s-%s\" % (row[\"c\"], row[\"a\"]),\n side=\"sell\",\n price=str(row[\"ca_bstb\"]),\n size=size,\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n else:\n size = row[\"ca_bstbsize\"]\n order_bc = client.create_limit_order(\n symbol=\"%s-%s\" % (row[\"c\"], row[\"a\"]),\n side=\"sell\",\n price=str(row[\"ca_bstb\"]),\n size=size,\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n if order_handling(order_bc) is True:\n logger.info(\"Reverse Triangle Arbitrage successful\")\n return True\n else:\n logger.info(\n \"Reverse Triangle Arbitrage Order 3 not filled successfully, cancelling arbitrage op\"\n )\n return False\n else:\n logger.info(\n \"Reverse Triangle Arbitrage Order 2 not filled successfully, cancelling arbitrage op\"\n )\n return False\n else:\n logger.info(\n \"Reverse Triangle Arbitrage Order 1 not filled successfully, cancelling arbitrage op\"\n )\n return False\n\n\ndef execute_triangular_arbitrage():\n \"\"\"Execute Triangular Arbitrage.\"\"\"\n cf = _load_config()\n cost = cf[\"fiat_cost_per_trade\"]\n api_key = cf[\"KUCOIN_YOUR_API_KEY\"]\n api_secret = cf[\"KUCOIN_YOUR_SECRET\"]\n api_passphrase = cf[\"KUCOIN_YOUR_PASS\"]\n client = Trade(\n key=api_key, secret=api_secret, passphrase=api_passphrase, is_sandbox=True\n )\n table = \"tri_arb_ops\"\n con = sqlite3.connect(\"db/kucoin.db\")\n cur = con.cursor()\n df = pd.read_sql_query(\"SELECT * FROM %s\" % (table), con)\n df = df.astype(\n {\n \"a\": \"str\",\n \"b\": \"str\",\n \"c\": \"str\",\n \"ba_bstb\": \"float\",\n \"ba_bsta\": \"float\",\n \"ba_bstbsize\": \"float\",\n \"ba_bstasize\": \"float\",\n \"bc_bstb\": \"float\",\n \"bc_bsta\": \"float\",\n \"bc_bstbsize\": \"float\",\n \"bc_bstasize\": \"float\",\n \"ca_bstb\": \"float\",\n \"ca_bsta\": \"float\",\n \"ca_bstbsize\": \"float\",\n \"ca_bstasize\": \"float\",\n \"fwd_arb\": \"float\",\n \"rev_arb\": \"float\",\n \"attempted\": \"str\",\n }\n )\n df = df[df[\"attempted\"] == \"N\"]\n df.fillna(0, inplace=True)\n for i, row in df.iterrows():\n logger.info(\n \"Execute triangular arbitrage for trio: %s,%s,%s\"\n % (row[\"a\"], row[\"b\"], row[\"c\"])\n )\n if row[\"fwd_arb\"] > row[\"rev_arb\"]:\n execute_fwd_tri_arbitrage(client, row, cost)\n update_table = \"UPDATE %s SET %s = %s WHERE %s = %s;\" % (\n table,\n \"attempted\",\n \"Y\",\n \"fwd_arb\",\n row[\"fwd_arb\"],\n )\n logger.info(\"Updating a row of data\")\n cur.execute(update_table)\n con.commit()\n con.close()\n elif row[\"rev_arb\"] > row[\"fwd_arb\"]:\n execute_rev_tri_arbitrage(client, row, cost)\n update_table = \"UPDATE %s SET %s = %s WHERE %s = %s;\" % (\n table,\n \"attempted\",\n \"Y\",\n \"rev_arb\",\n row[\"rev_arb\"],\n )\n logger.info(\"Updating a row of data\")\n cur.execute(update_table)\n con.commit()\n con.close()\n else:\n logger.info(\n \"Opportunities yield same profit percentage, attempting fwd instead\"\n )\n execute_fwd_tri_arbitrage(client, row, cost)\n\n\ndef execute_bellman_ford(client: Trade) -> bool:\n \"\"\"Execute Bellman Ford Trading Opportunity.\"\"\"\n con = sqlite3.connect(\"db/kucoin.db\")\n cur = con.cursor()\n bf_profit_query = pd.read_sql_query(\"select * from bf_arb_ops\", con=con)\n bf_profit_query = bf_profit_query[bf_profit_query[\"attempted\"] == \"N\"]\n for idx, row in bf_profit_query.iterrows():\n path = row[\"path\"].strip(\"][\").split(\", \")\n trade_type = row[\"trade_type\"].strip(\"][\").split(\", \")\n sizes = row[\"sizes\"].strip(\"][\").split(\", \")\n rates = row[\"rates\"].strip(\"][\").split(\", \")\n for i in range(len(path)):\n order = client.create_limit_order(\n symbol=\"%s\" % (path[i]),\n side=trade_type[i],\n price=rates[i],\n size=sizes[i],\n remark=\"test\",\n stp=\"CN\",\n trade_type=\"TRADE\",\n time_in_force=\"FOK\",\n )\n if order_handling(order) is True:\n logger.info(\n \"Bellman Ford arbitrage order %d successful for %s\" % (i, path[i])\n )\n continue\n else:\n logger.info(\n \"Bellman Ford arbitrage order %d not filled successfully for %s, cancelling arbitrage op\"\n % (i, path[i])\n )\n break\n table = \"bf_arb_ops\"\n update_table = \"UPDATE %s SET %s = %s WHERE %s = %s;\" % (\n table,\n \"attempted\",\n \"Y\",\n \"path\",\n row[\"path\"],\n )\n logger.info(\"Updating a row of data\")\n cur.execute(update_table)\n con.commit()\n con.close()\n\n\nif __name__ == \"__main__\":\n # execute_triangular_arbitrage()\n execute_bellman_ford()\n" ]
[ [ "pandas.set_option", "pandas.read_sql_query" ] ]
zhouyuanmin/MyDemo
[ "664977a6243992c77931e58b98f5262745759d1a" ]
[ "2020-12/demo7.py" ]
[ "import cv2\nimport numpy as np\nfrom numpy import ndarray\n\n\ndef face_update(url: str):\n img = cv2.imread(url)\n return img\n\n\ndef face_set(img, scale_factor=1.3, min_neighbors=3, min_size=(32, 32)):\n face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n faces = face_cascade.detectMultiScale(\n img,\n scaleFactor=scale_factor,\n minNeighbors=min_neighbors,\n minSize=min_size\n )\n return faces\n\n\ndef face_draw(img, faces, shape='square'):\n if shape == 'square': # 可以为方形\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 2)\n else: # 可以为圆形,还没实现\n pass\n\n\ndef face_show(img):\n # print('调用face_show函数')\n # cv2.imshow('win_name', img)\n cv2.imwrite('win_name40.png', img)\n\n\n# AI美颜\ndef change_darker(img: ndarray, value: int = 10):\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n if value > 3:\n img_hsv[:, :, 1] = np.log(img_hsv[:, :, 1] / 255 * (value - 1) + 1) / np.log(value + 1) * 255\n if value < 0:\n img_hsv[:, :, 1] = np.uint8(img_hsv[:, :, 1] / np.log(- value + np.e))\n return cv2.cvtColor(img_hsv, cv2.COLOR_HLS2BGR)\n\n\n# def beauty_face(url: str):\n# img = cv2.imdecode(np.fromfile(url, dtype=np.uint8), -1)\n# img_w = change_darker(img=img)\n# cv2.imshow('win_name', img_w)\n\n\ndef beauty_level(img: ndarray, level: int = 0):\n img.tmp_beauty_level = level\n return img\n\n\ndef beauty_face(img: ndarray):\n if hasattr(img, 'tmp_beauty_level'):\n value = img.tmp_beauty_level\n else:\n value = 0\n return change_darker(img, value=value)\n\n\nif __name__ == '__main__':\n img = face_update(\n \"/Users/zhou/Desktop/222.jpg\")\n print(type(img))\n # faces = beauty_level(img, 100)\n # beauty_face(img)\n img = change_darker(img)\n print(type(img))\n face_show(img)\n" ]
[ [ "numpy.log" ] ]
ingstra/pennylane
[ "0d416a62728f922f1d261d5000504a0c19765ee5" ]
[ "pennylane/transforms/classical_jacobian.py" ]
[ "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nContains the classical Jacobian transform.\r\n\"\"\"\r\n# pylint: disable=import-outside-toplevel\r\nimport pennylane as qml\r\nfrom pennylane import numpy as np\r\n\r\n\r\ndef classical_jacobian(qnode, argnum=None):\r\n r\"\"\"Returns a function to extract the Jacobian\r\n matrix of the classical part of a QNode.\r\n\r\n This transform allows the classical dependence between the QNode\r\n arguments and the quantum gate arguments to be extracted.\r\n\r\n Args:\r\n qnode (pennylane.QNode): QNode to compute the (classical) Jacobian of\r\n argnum (int or Sequence[int]): indices of QNode arguments with respect to which\r\n the (classical) Jacobian is computed\r\n\r\n Returns:\r\n function: Function which accepts the same arguments as the QNode.\r\n When called, this function will return the Jacobian of the QNode\r\n gate arguments with respect to the QNode arguments indexed by ``argnum``.\r\n\r\n **Example**\r\n\r\n Consider the following QNode:\r\n\r\n >>> @qml.qnode(dev)\r\n ... def circuit(weights):\r\n ... qml.RX(weights[0], wires=0)\r\n ... qml.RY(0.2 * weights[0], wires=1)\r\n ... qml.RY(2.5, wires=0)\r\n ... qml.RZ(weights[1] ** 2, wires=1)\r\n ... qml.RX(weights[2], wires=1)\r\n ... return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\r\n\r\n We can use this transform to extract the relationship :math:`f: \\mathbb{R}^n \\rightarrow\r\n \\mathbb{R}^m` between the input QNode arguments :math:`w` and the gate arguments :math:`g`, for\r\n a given value of the QNode arguments:\r\n\r\n >>> cjac_fn = qml.transforms.classical_jacobian(circuit)\r\n >>> weights = np.array([1., 1., 0.6], requires_grad=True)\r\n >>> cjac = cjac_fn(weights)\r\n >>> print(cjac)\r\n [[1. 0. 0. ]\r\n [0.2 0. 0. ]\r\n [0. 0. 0. ]\r\n [0. 1.2 0. ]\r\n [0. 0. 1. ]]\r\n\r\n The returned Jacobian has rows corresponding to gate arguments, and columns\r\n corresponding to QNode arguments; that is,\r\n\r\n .. math:: J_{ij} = \\frac{\\partial}{\\partial g_i} f(w_j).\r\n\r\n We can see that:\r\n\r\n - The zeroth element of ``weights`` is repeated on the first two gates generated by the QNode.\r\n\r\n - The third row consisting of all zeros indicates that the third gate ``RY(2.5)`` does not\r\n depend on the ``weights``.\r\n\r\n - The quadratic dependence of the fourth gate argument yields :math:`2\\cdot 0.6=1.2`.\r\n\r\n .. note::\r\n\r\n The QNode is constructed during this operation.\r\n\r\n For a QNode with multiple QNode arguments, the arguments with respect to which the\r\n Jacobian is computed can be controlled with the ``argnum`` keyword argument.\r\n The output for ``argnum=None`` depends on the backend:\r\n\r\n .. list-table:: Output format of ``classical_jacobian``\r\n :widths: 25 25 25 25\r\n :header-rows: 1\r\n\r\n * - Interface\r\n - ``argnum=None``\r\n - ``type(argnum)=int``\r\n - ``argnum=Sequence[int]``\r\n * - ``'autograd'``\r\n - ``tuple(arrays)`` [1]\r\n - ``array``\r\n - ``tuple(array)``\r\n * - ``'jax'``\r\n - ``array``\r\n - ``array``\r\n - ``tuple(array)``\r\n * - ``'tf'``\r\n - ``tuple(arrays)``\r\n - ``array``\r\n - ``tuple(array)``\r\n * - ``'torch'``\r\n - ``tuple(arrays)``\r\n - ``array``\r\n - ``tuple(array)``\r\n\r\n [1] If all QNode argument are scalars, the tuple is unpacked and the one-dimensional Jacobian\r\n arrays are stacked into one ``array``. If there only is one QNode argument, the tuple is\r\n unpacked as well. Both is due to the behaviour of ``qml.jacobian``.\r\n\r\n **Example with ``argnum``**\r\n\r\n >>> @qml.qnode(dev)\r\n ... def circuit(x, y, z):\r\n ... qml.RX(qml.math.sin(x), wires=0)\r\n ... qml.CNOT(wires=[0, 1])\r\n ... qml.RY(y ** 2, wires=1)\r\n ... qml.RZ(1 / z, wires=1)\r\n ... return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\r\n >>> jac_fn = qml.transforms.classical_jacobian(circuit, argnum=[1, 2])\r\n >>> x, y, z = np.array([0.1, -2.5, 0.71])\r\n >>> jac_fn(x, y, z)\r\n (array([-0., -5., -0.]), array([-0. , -0. , -1.98373339]))\r\n\r\n Only the Jacobians with respect to the arguments ``x`` and ``y`` were computed, and\r\n returned as a tuple of ``arrays``.\r\n\r\n \"\"\"\r\n\r\n def classical_preprocessing(*args, **kwargs):\r\n \"\"\"Returns the trainable gate parameters for a given QNode input.\"\"\"\r\n trainable_only = kwargs.pop(\"_trainable_only\", True)\r\n qnode.construct(args, kwargs)\r\n return qml.math.stack(qnode.qtape.get_parameters(trainable_only=trainable_only))\r\n\r\n if qnode.interface == \"autograd\":\r\n\r\n def _jacobian(*args, **kwargs):\r\n if argnum is None:\r\n jac = qml.jacobian(classical_preprocessing)(*args, **kwargs)\r\n elif np.isscalar(argnum):\r\n jac = qml.jacobian(classical_preprocessing, argnum=argnum)(*args, **kwargs)\r\n else:\r\n jac = tuple(\r\n (\r\n qml.jacobian(classical_preprocessing, argnum=i)(*args, **kwargs)\r\n for i in argnum\r\n )\r\n )\r\n return jac\r\n\r\n return _jacobian\r\n\r\n if qnode.interface == \"torch\":\r\n import torch\r\n\r\n def _jacobian(*args, **kwargs): # pylint: disable=unused-argument\r\n jac = torch.autograd.functional.jacobian(classical_preprocessing, args)\r\n if argnum is not None:\r\n if np.isscalar(argnum):\r\n jac = jac[argnum]\r\n else:\r\n jac = tuple((jac[idx] for idx in argnum))\r\n return jac\r\n\r\n return _jacobian\r\n\r\n if qnode.interface == \"jax\":\r\n import jax\r\n\r\n argnum = 0 if argnum is None else argnum\r\n\r\n def _jacobian(*args, **kwargs):\r\n kwargs[\"_trainable_only\"] = False\r\n return jax.jacobian(classical_preprocessing, argnums=argnum)(*args, **kwargs)\r\n\r\n return _jacobian\r\n\r\n if qnode.interface == \"tf\":\r\n import tensorflow as tf\r\n\r\n def _jacobian(*args, **kwargs):\r\n if np.isscalar(argnum):\r\n sub_args = args[argnum]\r\n elif argnum is None:\r\n sub_args = args\r\n else:\r\n sub_args = tuple((args[i] for i in argnum))\r\n\r\n with tf.GradientTape() as tape:\r\n gate_params = classical_preprocessing(*args, **kwargs)\r\n\r\n jac = tape.jacobian(gate_params, sub_args)\r\n return jac\r\n\r\n return _jacobian\r\n" ]
[ [ "tensorflow.GradientTape", "torch.autograd.functional.jacobian" ] ]
BorisPolonsky/KdConv
[ "fcee15bf8d4f6b0a80ffe01a00739755a7f44e0a" ]
[ "benchmark/memseq2seq/model.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport time\n\nfrom tensorflow.python.ops.nn import dynamic_rnn\nfrom utils.output_projection import output_projection_layer, MyDense, MyAttention, MyInferenceHelper\nfrom utils import SummaryHelper\nimport jieba\nimport os\nimport json\n\n\nclass Seq2SeqModel(object):\n\tdef __init__(self, data, args, embed):\n\n\t\tself.posts = tf.placeholder(tf.int32, (None, None), 'enc_inps') # batch*len\n\t\tself.posts_length = tf.placeholder(tf.int32, (None,), 'enc_lens') # batch\n\t\tself.prevs_length = tf.placeholder(tf.int32, (None,), 'enc_lens_prev') # batch\n\t\tself.origin_responses = tf.placeholder(tf.int32, (None, None), 'dec_inps') # batch*len\n\t\tself.origin_responses_length = tf.placeholder(tf.int32, (None,), 'dec_lens') # batch\n\n\t\tself.kgs = tf.placeholder(tf.int32, (None, None, None), 'kg_inps') # batch*len\n\t\tself.kgs_h_length = tf.placeholder(tf.int32, (None, None), 'kg_h_lens') # batch\n\t\tself.kgs_hr_length = tf.placeholder(tf.int32, (None, None), 'kg_hr_lens') # batch\n\t\tself.kgs_hrt_length = tf.placeholder(tf.int32, (None, None), 'kg_hrt_lens') # batch\n\t\tself.kgs_index = tf.placeholder(tf.float32, (None, None), 'kg_indices') # batch\n\n\t\tself.is_train = tf.placeholder(tf.bool)\n\n\t\t# deal with original data to adapt encoder and decoder\n\t\tbatch_size, decoder_len = tf.shape(self.origin_responses)[0], tf.shape(self.origin_responses)[1]\n\t\tself.responses = tf.split(self.origin_responses, [1, decoder_len-1], 1)[1] # no go_id\n\t\tself.responses_length = self.origin_responses_length - 1\n\t\tself.responses_input = tf.split(self.origin_responses, [decoder_len-1, 1], 1)[0] # no eos_id\n\t\tself.responses_target = self.responses\n\t\tdecoder_len = decoder_len - 1\n\t\tself.posts_input = self.posts # batch*len\n\t\tself.decoder_mask = tf.reshape(tf.cumsum(tf.one_hot(self.responses_length-1,\n\t\t\tdecoder_len), reverse=True, axis=1), [-1, decoder_len])\n\t\tkg_len = tf.shape(self.kgs)[2]\n\t\t#kg_len = tf.Print(kg_len, [batch_size, kg_len, decoder_len, self.kgs_length])\n\t\tkg_h_mask = tf.reshape(tf.cumsum(tf.one_hot(self.kgs_h_length-1,\n\t\t\tkg_len), reverse=True, axis=2), [batch_size, -1, kg_len, 1])\n\t\tkg_hr_mask = tf.reshape(tf.cumsum(tf.one_hot(self.kgs_hr_length-1,\n\t\t\tkg_len), reverse=True, axis=2), [batch_size, -1, kg_len, 1])\n\t\tkg_hrt_mask = tf.reshape(tf.cumsum(tf.one_hot(self.kgs_hrt_length-1,\n\t\t\tkg_len), reverse=True, axis=2), [batch_size, -1, kg_len, 1])\n\t\tkg_key_mask = kg_hr_mask\n\t\tkg_value_mask = kg_hrt_mask - kg_hr_mask\n\n\t\t# initialize the training process\n\t\tself.learning_rate = tf.Variable(float(args.lr), trainable=False, dtype=tf.float32)\n\t\tself.learning_rate_decay_op = self.learning_rate.assign(self.learning_rate * args.lr_decay)\n\t\tself.global_step = tf.Variable(0, trainable=False)\n\n\t\t# build the embedding table and embedding input\n\t\tif embed is None:\n\t\t\t# initialize the embedding randomly\n\t\t\tself.embed = tf.get_variable('embed', [data.vocab_size, args.embedding_size], tf.float32)\n\t\telse:\n\t\t\t# initialize the embedding by pre-trained word vectors\n\t\t\tself.embed = tf.get_variable('embed', dtype=tf.float32, initializer=embed)\n\n\t\tself.encoder_input = tf.nn.embedding_lookup(self.embed, self.posts)\n\t\tself.decoder_input = tf.nn.embedding_lookup(self.embed, self.responses_input)\n\t\tself.kg_input = tf.nn.embedding_lookup(self.embed, self.kgs)\n\t\t#self.encoder_input = tf.cond(self.is_train,\n\t\t#\t\t\t\t\t\t\t lambda: tf.nn.dropout(tf.nn.embedding_lookup(self.embed, self.posts_input), 0.8),\n\t\t#\t\t\t\t\t\t\t lambda: tf.nn.embedding_lookup(self.embed, self.posts_input)) #batch*len*unit\n\t\t#self.decoder_input = tf.cond(self.is_train,\n\t\t#\t\t\t\t\t\t\t lambda: tf.nn.dropout(tf.nn.embedding_lookup(self.embed, self.responses_input), 0.8),\n\t\t#\t\t\t\t\t\t\t lambda: tf.nn.embedding_lookup(self.embed, self.responses_input))\n\n\t\t# build rnn_cell\n\t\tcell_enc = tf.nn.rnn_cell.GRUCell(args.eh_size)\n\t\tcell_dec = tf.nn.rnn_cell.GRUCell(args.dh_size)\n\n\t\t# build encoder\n\t\twith tf.variable_scope('encoder'):\n\t\t\tencoder_output, encoder_state = dynamic_rnn(cell_enc, self.encoder_input,\n\t\t\t\tself.posts_length, dtype=tf.float32, scope=\"encoder_rnn\")\n\n\t\tself.kg_key_avg = tf.reduce_sum(self.kg_input * kg_key_mask, axis=2) / tf.maximum(tf.reduce_sum(kg_key_mask, axis=2), tf.ones_like(tf.expand_dims(self.kgs_hrt_length, -1), dtype=tf.float32))\n\t\tself.kg_value_avg = tf.reduce_sum(self.kg_input * kg_value_mask, axis=2) / tf.maximum(tf.reduce_sum(kg_value_mask, axis=2), tf.ones_like(tf.expand_dims(self.kgs_hrt_length, -1), dtype=tf.float32))\n\t\twith tf.variable_scope('knowledge'):\n\t\t\tquery = tf.reshape(tf.layers.dense(tf.concat(encoder_state, axis=-1), args.embedding_size, use_bias=False), [batch_size, 1, args.embedding_size])\n\t\tkg_score = tf.reduce_sum(query * self.kg_key_avg, axis=2)\n\t\tkg_score = tf.where(tf.greater(self.kgs_hrt_length, 0), kg_score, - tf.ones_like(kg_score) * np.inf)\n\t\tkg_alignment = tf.nn.softmax(kg_score)\n\n\t\tkg_max = tf.argmax(kg_alignment, axis=-1)\n\t\tkg_max_onehot = tf.one_hot(kg_max, tf.shape(kg_alignment)[1], dtype=tf.float32)\n\t\tself.kg_acc = tf.reduce_sum(kg_max_onehot * self.kgs_index) / tf.maximum(\n\t\t\ttf.reduce_sum(tf.reduce_max(self.kgs_index, axis=-1)), tf.constant(1.0))\n\t\tself.kg_loss = tf.reduce_sum(- tf.log(tf.clip_by_value(kg_alignment, 1e-12, 1.0)) * self.kgs_index, axis=1) / tf.maximum(\n\t\t\ttf.reduce_sum(self.kgs_index, axis=1), tf.ones([batch_size], dtype=tf.float32))\n\t\tself.kg_loss = tf.reduce_mean(self.kg_loss)\n\n\t\tself.knowledge_embed = tf.reduce_sum(tf.expand_dims(kg_alignment, axis=-1) * self.kg_value_avg, axis=1)\n\t\tknowledge_embed_extend = tf.tile(tf.expand_dims(self.knowledge_embed, axis=1), [1, decoder_len, 1])\n\t\tself.decoder_input = tf.concat([self.decoder_input, knowledge_embed_extend], axis=2)\n\n\t\t# get output projection function\n\t\toutput_fn = MyDense(data.vocab_size, use_bias = True)\n\t\tsampled_sequence_loss = output_projection_layer(args.dh_size, data.vocab_size, args.softmax_samples)\n\n\t\tencoder_len = tf.shape(encoder_output)[1]\n\t\tposts_mask = tf.sequence_mask(self.posts_length, encoder_len)\n\t\tprevs_mask = tf.sequence_mask(self.prevs_length, encoder_len)\n\t\tattention_mask = tf.reshape(tf.logical_xor(posts_mask, prevs_mask), [batch_size, encoder_len])\n\n\t\t# construct helper and attention\n\t\ttrain_helper = tf.contrib.seq2seq.TrainingHelper(self.decoder_input, self.responses_length)\n\t\t#infer_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(self.embed, tf.fill([batch_size], data.go_id), data.eos_id)\n\t\tinfer_helper = MyInferenceHelper(self.embed, tf.fill([batch_size], data.go_id), data.eos_id, self.knowledge_embed)\n\t\t#attn_mechanism = tf.contrib.seq2seq.BahdanauAttention(args.dh_size, encoder_output,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t # memory_sequence_length=self.posts_length)\n\t\tattn_mechanism = MyAttention(args.dh_size, encoder_output, attention_mask)\n\t\tcell_dec_attn = tf.contrib.seq2seq.AttentionWrapper(cell_dec, attn_mechanism,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tattention_layer_size=args.dh_size)\n\t\tenc_state_shaping = tf.layers.dense(encoder_state, args.dh_size, activation = None)\n\t\tdec_start = cell_dec_attn.zero_state(batch_size, dtype = tf.float32).clone(cell_state = enc_state_shaping)\n\n\t\t# build decoder (train)\n\t\twith tf.variable_scope('decoder'):\n\t\t\tdecoder_train = tf.contrib.seq2seq.BasicDecoder(cell_dec_attn, train_helper, dec_start)\n\t\t\ttrain_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder_train, impute_finished = True, scope = \"decoder_rnn\")\n\t\t\tself.decoder_output = train_outputs.rnn_output\n\t\t\t#self.decoder_output = tf.nn.dropout(self.decoder_output, 0.8)\n\t\t\tself.decoder_distribution_teacher, self.decoder_loss, self.decoder_all_loss = \\\n\t\t\t\tsampled_sequence_loss(self.decoder_output, self.responses_target, self.decoder_mask)\n\n\t\t# build decoder (test)\n\t\twith tf.variable_scope('decoder', reuse=True):\n\t\t\tdecoder_infer = tf.contrib.seq2seq.BasicDecoder(cell_dec_attn, infer_helper, dec_start, output_layer = output_fn)\n\t\t\tinfer_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder_infer, impute_finished = True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t maximum_iterations=args.max_sent_length, scope = \"decoder_rnn\")\n\t\t\tself.decoder_distribution = infer_outputs.rnn_output\n\t\t\tself.generation_index = tf.argmax(tf.split(self.decoder_distribution,\n\t\t\t\t[2, data.vocab_size-2], 2)[1], 2) + 2 # for removing UNK\n\n\t\t# calculate the gradient of parameters and update\n\t\tself.params = [k for k in tf.trainable_variables() if args.name in k.name]\n\t\topt = tf.train.AdamOptimizer(self.learning_rate)\n\t\tself.loss = self.decoder_loss + self.kg_loss\n\t\tgradients = tf.gradients(self.loss, self.params)\n\t\tclipped_gradients, self.gradient_norm = tf.clip_by_global_norm(gradients, \n\t\t\t\targs.grad_clip)\n\t\tself.update = opt.apply_gradients(zip(clipped_gradients, self.params), \n\t\t\t\tglobal_step=self.global_step)\n\n\t\t# save checkpoint\n\t\tself.latest_saver = tf.train.Saver(write_version=tf.train.SaverDef.V2,\n\t\t\t\tmax_to_keep=args.checkpoint_max_to_keep, pad_step_number=True, keep_checkpoint_every_n_hours=1.0)\n\t\tself.best_saver = tf.train.Saver(write_version=tf.train.SaverDef.V2,\n\t\t\t\tmax_to_keep=1, pad_step_number=True, keep_checkpoint_every_n_hours=1.0)\n\n\t\t# create summary for tensorboard\n\t\tself.create_summary(args)\n\n\tdef store_checkpoint(self, sess, path, key, name):\n\t\tif key == \"latest\":\n\t\t\tself.latest_saver.save(sess, path, global_step = self.global_step, latest_filename = name)\n\t\telse:\n\t\t\tself.best_saver.save(sess, path, global_step = self.global_step, latest_filename = name)\n\t\t\t#self.best_global_step = self.global_step\n\n\tdef create_summary(self, args):\n\t\tself.summaryHelper = SummaryHelper(\"%s/%s_%s\" % \\\n\t\t\t\t(args.log_dir, args.name, time.strftime(\"%H%M%S\", time.localtime())), args)\n\n\t\tself.trainSummary = self.summaryHelper.addGroup(scalar=[\"loss\", \"perplexity\"], prefix=\"train\")\n\n\t\tscalarlist = [\"loss\", \"perplexity\"]\n\t\ttensorlist = []\n\t\ttextlist = []\n\t\tfor i in args.show_sample:\n\t\t\ttextlist.append(\"show_str%d\" % i)\n\t\tself.devSummary = self.summaryHelper.addGroup(scalar=scalarlist, tensor=tensorlist, text=textlist,\n\t\t\t\t\t\t\t\t\t\t\t\t\t prefix=\"dev\")\n\t\tself.testSummary = self.summaryHelper.addGroup(scalar=scalarlist, tensor=tensorlist, text=textlist,\n\t\t\t\t\t\t\t\t\t\t\t\t\t prefix=\"test\")\n\n\n\tdef print_parameters(self):\n\t\tfor item in self.params:\n\t\t\tprint('%s: %s' % (item.name, item.get_shape()))\n\t\n\tdef step_decoder(self, session, data, forward_only=False):\n\t\tinput_feed = {\n\t\t\t\tself.posts: data['post'], \n\t\t\t\tself.posts_length: data['post_length'], \n\t\t\t\tself.prevs_length: data['prev_length'],\n\t\t\t\tself.origin_responses: data['resp'], \n\t\t\t\tself.origin_responses_length: data['resp_length'],\n\t\t\t\tself.kgs: data['kg'],\n\t\t\t\tself.kgs_h_length: data['kg_h_length'],\n\t\t\t\tself.kgs_hr_length: data['kg_hr_length'],\n\t\t\t\tself.kgs_hrt_length: data['kg_hrt_length'],\n\t\t\t\tself.kgs_index: data['kg_index'],\n\t\t\t\tself.is_train: True\n\t\t\t\t}\n\t\tif forward_only:\n\t\t\toutput_feed = [self.decoder_loss, self.decoder_distribution_teacher, self.kg_loss, self.kg_acc]\n\t\telse:\n\t\t\toutput_feed = [self.decoder_loss, self.gradient_norm, self.update, self.kg_loss, self.kg_acc]\n\t\treturn session.run(output_feed, input_feed)\n\n\tdef inference(self, session, data):\n\t\tinput_feed = {\n\t\t\t\tself.posts: data['post'], \n\t\t\t\tself.posts_length: data['post_length'], \n\t\t\t\tself.prevs_length: data['prev_length'],\n\t\t\t\tself.origin_responses: data['resp'], \n\t\t\t\tself.origin_responses_length: data['resp_length'],\n\t\t\t\tself.kgs: data['kg'],\n\t\t\t\tself.kgs_h_length: data['kg_h_length'],\n\t\t\t\tself.kgs_hr_length: data['kg_hr_length'],\n\t\t\t\tself.kgs_hrt_length: data['kg_hrt_length'],\n\t\t\t\tself.kgs_index: data['kg_index'],\n\t\t\t\tself.is_train: False\n\t\t\t\t}\n\t\toutput_feed = [self.generation_index, self.decoder_distribution_teacher,\n\t\t\t\t\t self.decoder_all_loss, self.kg_loss, self.kg_acc]\n\t\treturn session.run(output_feed, input_feed)\n\n\tdef evaluate(self, sess, data, batch_size, key_name):\n\t\tloss = np.zeros((3,))\n\t\ttotal_length = np.zeros((3,))\n\t\tdata.restart(key_name, batch_size=batch_size, shuffle=False)\n\t\tbatched_data = data.get_next_batch(key_name)\n\t\twhile batched_data != None:\n\t\t\tdecoder_loss, _, kg_loss, kg_acc = self.step_decoder(sess, batched_data, forward_only=True)\n\t\t\tlength = np.sum(np.maximum(np.array(batched_data['resp_length']) - 1, 0))\n\t\t\tkg_length = np.sum(np.max(batched_data['kg_index'], axis=-1))\n\t\t\ttotal_length += [length, kg_length, kg_length]\n\t\t\tloss += [decoder_loss * length, kg_loss * kg_length, kg_acc * kg_length]\n\t\t\tbatched_data = data.get_next_batch(key_name)\n\n\t\tloss /= total_length\n\t\tprint('\tperplexity on %s set: %.2f, kg_ppx: %.2f, kg_loss: %.4f, kg_acc: %.4f' % (\n\t\tkey_name, np.exp(loss[0]), np.exp(loss[1]), loss[1], loss[2]))\n\t\treturn loss\n\n\tdef train_process(self, sess, data, args):\n\t\tloss_step, time_step, epoch_step = np.zeros((3,)), .0, 0\n\t\tprevious_losses = [1e18] * 3\n\t\tbest_valid = 1e18\n\t\tdata.restart(\"train\", batch_size=args.batch_size, shuffle=True)\n\t\tbatched_data = data.get_next_batch(\"train\")\n\n\t\tfor i in range(3):\n\t\t\tprint(data.convert_ids_to_tokens(batched_data['post_allvocabs'][i].tolist(), trim=False))\n\t\t\tprint(batched_data['prev_length'][i], batched_data['post_length'][i])\n\t\t\tprint(data.convert_ids_to_tokens(batched_data['post_allvocabs'][i].tolist()[batched_data['prev_length'][i]: batched_data['post_length'][i]], trim=False))\n\t\t\tprint(data.convert_ids_to_tokens(batched_data['resp_allvocabs'][i].tolist(), trim=False))\n\n\t\tfor epoch_step in range(args.epochs):\n\t\t\twhile batched_data != None:\n\t\t\t\tif self.global_step.eval() % args.checkpoint_steps == 0 and self.global_step.eval() != 0:\n\t\t\t\t\tprint(\n\t\t\t\t\t\t\"Epoch %d global step %d learning rate %.4f step-time %.2f perplexity: %.2f, kg_ppx: %.2f, kg_loss: %.4f, kg_acc: %.4f\" % (\n\t\t\t\t\t\t\tepoch_step, self.global_step.eval(), self.learning_rate.eval(), time_step, np.exp(loss_step[0]),\n\t\t\t\t\t\t\tnp.exp(loss_step[1]), loss_step[1], loss_step[2]))\n\t\t\t\t\tself.trainSummary(self.global_step.eval() // args.checkpoint_steps,\n\t\t\t\t\t\t\t\t\t {'loss': loss_step[0], 'perplexity': np.exp(loss_step[0])})\n\t\t\t\t\tself.store_checkpoint(sess, '%s/checkpoint_latest/%s' % (args.model_dir, args.name), \"latest\", args.name)\n\n\t\t\t\t\tdev_loss = self.evaluate(sess, data, args.batch_size, \"dev\")\n\t\t\t\t\tself.devSummary(self.global_step.eval() // args.checkpoint_steps,\n\t\t\t\t\t\t\t\t\t{'loss': dev_loss[0], 'perplexity': np.exp(dev_loss[0])})\n\n\t\t\t\t\tif np.sum(loss_step) > max(previous_losses):\n\t\t\t\t\t\tsess.run(self.learning_rate_decay_op)\n\t\t\t\t\tif dev_loss[0] < best_valid:\n\t\t\t\t\t\tbest_valid = dev_loss[0]\n\t\t\t\t\t\tself.store_checkpoint(sess, '%s/checkpoint_best/%s' % (args.model_dir, args.name), \"best\", args.name)\n\n\t\t\t\t\tprevious_losses = previous_losses[1:] + [np.sum(loss_step[0])]\n\t\t\t\t\tloss_step, time_step = np.zeros((3,)), .0\n\n\t\t\t\tstart_time = time.time()\n\t\t\t\tstep_out = self.step_decoder(sess, batched_data)\n\t\t\t\tloss_step += np.array([step_out[0], step_out[3], step_out[4]]) / args.checkpoint_steps\n\t\t\t\ttime_step += (time.time() - start_time) / args.checkpoint_steps\n\t\t\t\tbatched_data = data.get_next_batch(\"train\")\n\n\t\t\tdata.restart(\"train\", batch_size=args.batch_size, shuffle=True)\n\t\t\tbatched_data = data.get_next_batch(\"train\")\n\n\tdef test_process_hits(self, sess, data, args):\n\n\t\twith open(os.path.join(args.datapath, 'test_distractors.json'), 'r', encoding='utf8') as f:\n\t\t\ttest_distractors = json.load(f)\n\n\t\tdata.restart(\"test\", batch_size=1, shuffle=False)\n\t\tbatched_data = data.get_next_batch(\"test\")\n\n\t\tloss_record = []\n\t\tcnt = 0\n\t\twhile batched_data != None:\n\n\t\t\tfor key in batched_data:\n\t\t\t\tif isinstance(batched_data[key], np.ndarray):\n\t\t\t\t\tbatched_data[key] = batched_data[key].tolist()\n\n\t\t\tbatched_data['resp_length'] = [len(batched_data['resp'][0])]\n\t\t\tfor each_resp in test_distractors[cnt]:\n\t\t\t\tbatched_data['resp'].append([data.go_id] + data.convert_tokens_to_ids(jieba.lcut(each_resp)) + [data.eos_id])\n\t\t\t\tbatched_data['resp_length'].append(len(batched_data['resp'][-1]))\n\t\t\tmax_length = max(batched_data['resp_length'])\n\t\t\tresp = np.zeros((len(batched_data['resp']), max_length), dtype=int)\n\t\t\tfor i, each_resp in enumerate(batched_data['resp']):\n\t\t\t\tresp[i, :len(each_resp)] = each_resp\n\t\t\tbatched_data['resp'] = resp\n\n\t\t\tpost = []\n\t\t\tpost_length = []\n\t\t\tprev_length = []\n\n\t\t\tkg = []\n\t\t\tkg_h_length = []\n\t\t\tkg_hr_length = []\n\t\t\tkg_hrt_length = []\n\t\t\tkg_index = []\n\n\t\t\tfor _ in range(len(resp)):\n\t\t\t\tpost += batched_data['post']\n\t\t\t\tpost_length += batched_data['post_length']\n\t\t\t\tprev_length += batched_data['prev_length']\n\n\t\t\t\tkg += batched_data['kg']\n\t\t\t\tkg_h_length += batched_data['kg_h_length']\n\t\t\t\tkg_hr_length += batched_data['kg_hr_length']\n\t\t\t\tkg_hrt_length += batched_data['kg_hrt_length']\n\t\t\t\tkg_index += batched_data['kg_index']\n\n\t\t\tbatched_data['post'] = post\n\t\t\tbatched_data['post_length'] = post_length\n\t\t\tbatched_data['prev_length'] = prev_length\n\n\t\t\tbatched_data['kg'] = kg\n\t\t\tbatched_data['kg_h_length'] = kg_h_length\n\t\t\tbatched_data['kg_hr_length'] = kg_hr_length\n\t\t\tbatched_data['kg_hrt_length'] = kg_hrt_length\n\t\t\tbatched_data['kg_index'] = kg_index\n\n\t\t\t_, _, loss, _, _ = self.inference(sess, batched_data)\n\t\t\tloss_record.append(loss)\n\t\t\tcnt += 1\n\t\t\tbatched_data = data.get_next_batch(\"test\")\n\n\t\tassert cnt == len(test_distractors)\n\n\t\tloss = np.array(loss_record)\n\t\tloss_rank = np.argsort(loss, axis=1)\n\t\thits1 = float(np.mean(loss_rank[:, 0] == 0))\n\t\thits3 = float(np.mean(np.min(loss_rank[:, :3], axis=1) == 0))\n\t\treturn {'hits@1' : hits1, 'hits@3': hits3}\n\n\tdef test_process(self, sess, data, args):\n\n\t\tmetric1 = data.get_teacher_forcing_metric()\n\t\tmetric2 = data.get_inference_metric()\n\t\tdata.restart(\"test\", batch_size=args.batch_size, shuffle=False)\n\t\tbatched_data = data.get_next_batch(\"test\")\n\n\t\tfor i in range(3):\n\t\t\tprint(' post %d ' % i, data.convert_ids_to_tokens(batched_data['post_allvocabs'][i].tolist(), trim=False))\n\t\t\tprint(' resp %d ' % i, data.convert_ids_to_tokens(batched_data['resp_allvocabs'][i].tolist(), trim=False))\n\n\t\twhile batched_data != None:\n\t\t\tbatched_responses_id, gen_log_prob, _, _, _ = self.inference(sess, batched_data)\n\t\t\tmetric1_data = {'resp_allvocabs': np.array(batched_data['resp_allvocabs']),\n\t\t\t\t\t\t\t'resp_length': np.array(batched_data['resp_length']), 'gen_log_prob': np.array(gen_log_prob)}\n\t\t\tmetric1.forward(metric1_data)\n\t\t\tbatch_results = []\n\t\t\tfor response_id in batched_responses_id:\n\t\t\t\tresponse_id_list = response_id.tolist()\n\t\t\t\tif data.eos_id in response_id_list:\n\t\t\t\t\tresult_id = response_id_list[:response_id_list.index(data.eos_id) + 1]\n\t\t\t\telse:\n\t\t\t\t\tresult_id = response_id_list\n\t\t\t\tbatch_results.append(result_id)\n\n\t\t\tmetric2_data = {'gen': np.array(batch_results), 'resp_allvocabs': np.array(batched_data['resp_allvocabs'])}\n\t\t\tmetric2.forward(metric2_data)\n\t\t\tbatched_data = data.get_next_batch(\"test\")\n\n\t\tres = metric1.close()\n\t\tres.update(metric2.close())\n\t\tres.update(self.test_process_hits(sess, data, args))\n\n\t\ttest_file = args.out_dir + \"/%s_%s.txt\" % (args.name, \"test\")\n\t\twith open(test_file, 'w') as f:\n\t\t\tprint(\"Test Result:\")\n\t\t\tres_print = list(res.items())\n\t\t\tres_print.sort(key=lambda x: x[0])\n\t\t\tfor key, value in res_print:\n\t\t\t\tif isinstance(value, float):\n\t\t\t\t\tprint(\"\\t%s:\\t%f\" % (key, value))\n\t\t\t\t\tf.write(\"%s:\\t%f\\n\" % (key, value))\n\t\t\tf.write('\\n')\n\t\t\tfor i in range(len(res['resp'])):\n\t\t\t\tf.write(\"resp:\\t%s\\n\" % \" \".join(res['resp'][i]))\n\t\t\t\tf.write(\"gen:\\t%s\\n\\n\" % \" \".join(res['gen'][i]))\n\n\t\tprint(\"result output to %s.\" % test_file)\n\t\treturn {key: val for key, val in res.items() if type(val) in [bytes, int, float]}\n" ]
[ [ "tensorflow.ones", "tensorflow.ones_like", "numpy.exp", "tensorflow.gradients", "numpy.mean", "numpy.min", "tensorflow.clip_by_value", "tensorflow.nn.embedding_lookup", "tensorflow.greater", "tensorflow.nn.softmax", "tensorflow.one_hot", "numpy.max", "tensorflow.trainable_variables", "tensorflow.shape", "tensorflow.concat", "tensorflow.argmax", "tensorflow.Variable", "tensorflow.train.Saver", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.layers.dense", "tensorflow.split", "numpy.array", "tensorflow.train.AdamOptimizer", "numpy.zeros", "tensorflow.expand_dims", "tensorflow.fill", "tensorflow.contrib.seq2seq.BasicDecoder", "tensorflow.logical_xor", "tensorflow.placeholder", "tensorflow.reduce_sum", "numpy.argsort", "tensorflow.get_variable", "tensorflow.python.ops.nn.dynamic_rnn", "tensorflow.clip_by_global_norm", "tensorflow.sequence_mask", "tensorflow.contrib.seq2seq.dynamic_decode", "numpy.sum", "tensorflow.nn.rnn_cell.GRUCell", "tensorflow.reduce_max", "tensorflow.contrib.seq2seq.TrainingHelper", "tensorflow.reduce_mean", "tensorflow.contrib.seq2seq.AttentionWrapper" ] ]
tuanchien/asd
[ "190c1c6d155b16a27717596d6350598e5cd4ffac" ]
[ "ava_asd/static_audio_model.py" ]
[ "# Copyright 2020 Tuan Chien, James Diprose\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Author: Tuan Chien, James Diprose\n\nfrom tensorflow.keras.layers import Conv2D, Dense, MaxPool2D, Input, Flatten\nfrom tensorflow.keras.models import Model\n\n\ndef static_audio(shape=(13, 20, 1), weights=None):\n \"\"\"\n Implements the model architecture of the arXiv:1906.10555v1 audio feature tower, i.e., variation of VGGM.\n shape=(nmfcc, timewindow, 1)\n \"\"\"\n inputs = Input(shape=shape, name='a_in') # input\n\n x = Conv2D(64, (3, 3), padding='same', activation='relu',\n data_format='channels_last', name='s_a_conv1')(inputs) # conv1\n x = MaxPool2D((1, 1), padding='same', name='s_a_pool1')(x) # pool1\n\n x = Conv2D(192, (3, 3), padding='same', activation='relu',\n data_format='channels_last', name='s_a_conv2')(x) # conv2\n x = MaxPool2D((3, 3), padding='same', name='s_a_pool2')(x) # pool2\n\n x = Conv2D(384, (3, 3), padding='same', activation='relu',\n data_format='channels_last', name='s_a_conv3')(x) # conv3\n\n x = Conv2D(256, (3, 3), padding='same', activation='relu',\n data_format='channels_last', name='s_a_conv4')(x) # conv4\n\n x = Conv2D(256, (3, 3), padding='same', activation='relu',\n data_format='channels_last', name='s_a_conv5')(x) # conv5\n x = MaxPool2D((3, 3), padding='same', name='s_a_pool5')(x) # pool5\n\n x = Conv2D(512, (5, 4), padding='same', activation='relu',\n data_format='channels_last', name='s_a_conv6')(x) # conv6\n\n x = Flatten()(x)\n x = Dense(256, activation='relu', name='s_a_fc7')(x) # fc7\n\n outputs = Dense(2, activation='softmax', name='main_out')(x)\n\n model = Model(inputs=inputs, outputs=outputs)\n\n if weights is not None:\n model.load_weights(weights, by_name=True)\n\n return model\n" ]
[ [ "tensorflow.keras.layers.Input", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.MaxPool2D", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D" ] ]
ViniciusLima94/frites
[ "270230a2b3abeb676211d3dec54ce6d039eb2f91" ]
[ "frites/core/gcmi_1d.py" ]
[ "\"\"\"\nGaussian copula mutual information estimation.\n\n| **Authors** : Robin AA. Ince\n| **Original code** : https://github.com/robince/gcmi\n| **Reference** :\n| RAA Ince, BL Giordano, C Kayser, GA Rousselet, J Gross and PG Schyns\n\"A statistical framework for neuroimaging data analysis based on mutual\ninformation estimated via a Gaussian copula\" Human Brain Mapping (2017)\n38 p. 1541-1573 doi:10.1002/hbm.23471\n\"\"\"\nimport numpy as np\nimport scipy as sp\n\nfrom frites.core import copnorm_nd, copnorm_cat_nd\n\n\ndef ent_1d_g(x, biascorrect=True):\n \"\"\"Entropy of a Gaussian variable in bits.\n\n H = ent_g(x) returns the entropy of a (possibly multidimensional) Gaussian\n variable x with bias correction.\n\n Parameters\n ----------\n x : array_like\n Array of data of shape (n_epochs,)\n biascorrect : bool | True\n Specifies whether bias correction should be applied to the estimated MI\n\n Returns\n -------\n hx : float\n Entropy of the gaussian variable (in bits)\n \"\"\"\n x = np.atleast_2d(x)\n if x.ndim > 2:\n raise ValueError(\"x must be at most 2d\") # noqa\n nvarx, ntrl = x.shape\n\n # demean data\n x = x - x.mean(axis=1)[:, np.newaxis]\n # covariance\n c = np.dot(x, x.T) / float(ntrl - 1)\n chc = np.linalg.cholesky(c)\n\n # entropy in nats\n hx = np.sum(np.log(np.diagonal(chc))) + .5 * nvarx * (\n np.log(2 * np.pi) + 1.)\n\n ln2 = np.log(2)\n if biascorrect:\n psiterms = sp.special.psi((ntrl - np.arange(1, nvarx + 1).astype(\n float)) / 2.) / 2.\n dterm = (ln2 - np.log(ntrl - 1.)) / 2.\n hx = hx - nvarx * dterm - psiterms.sum()\n\n # convert to bits\n return hx / ln2\n\n\ndef mi_1d_gg(x, y, relative=False, biascorrect=True, demeaned=False):\n \"\"\"Mutual information (MI) between two Gaussian variables in bits.\n\n I = mi_gg(x,y) returns the MI between two (possibly multidimensional)\n Gaussian variables, x and y, with bias correction.\n\n Parameters\n ----------\n x, y : array_like\n Gaussian arrays of shape (n_epochs,) or (n_dimensions, n_epochs)\n relative: bool | False\n If true returns the normalized MI (MI(x,y)/H(x))\n biascorrect : bool | True\n Specifies whether bias correction should be applied to the estimated MI\n demeaned : bool | False\n Specifies whether the input data already has zero mean (true if it has\n been copula-normalized)\n\n Returns\n -------\n i : float\n Information shared by x and y in bits or undmensional if\n relative is True\n \"\"\"\n x, y = np.atleast_2d(x), np.atleast_2d(y)\n if (x.ndim > 2) or (y.ndim > 2):\n raise ValueError(\"x and y must be at most 2d\")\n nvarx, ntrl = x.shape\n nvary = y.shape[0]\n nvarxy = nvarx + nvary\n\n if y.shape[1] != ntrl:\n raise ValueError(\"number of trials do not match\")\n\n # joint variable\n xy = np.vstack((x, y))\n if not demeaned:\n xy = xy - xy.mean(axis=1)[:, np.newaxis]\n cxy = np.dot(xy, xy.T) / float(ntrl - 1)\n # submatrices of joint covariance\n cx = cxy[:nvarx, :nvarx]\n cy = cxy[nvarx:, nvarx:]\n\n chcxy = np.linalg.cholesky(cxy)\n chcx = np.linalg.cholesky(cx)\n chcy = np.linalg.cholesky(cy)\n\n # entropies in nats\n # normalizations cancel for mutual information\n hx = np.sum(np.log(np.diagonal(chcx)))\n hy = np.sum(np.log(np.diagonal(chcy)))\n hxy = np.sum(np.log(np.diagonal(chcxy)))\n\n ln2 = np.log(2)\n if biascorrect:\n psiterms = sp.special.psi(\n (ntrl - np.arange(1, nvarxy + 1)).astype(float) / 2.) / 2.\n dterm = (ln2 - np.log(ntrl - 1.)) / 2.\n hx = hx - nvarx * dterm - psiterms[:nvarx].sum()\n hy = hy - nvary * dterm - psiterms[:nvary].sum()\n hxy = hxy - nvarxy * dterm - psiterms[:nvarxy].sum()\n\n i = hx + hy - hxy\n if relative:\n i = i / hx\n else:\n i = i / ln2\n # MI in bits\n return i\n\n\ndef gcmi_1d_cc(x, y, relative=False):\n \"\"\"Gaussian-Copula MI between two continuous variables.\n\n I = gcmi_cc(x,y) returns the MI between two (possibly multidimensional)\n continuous variables, x and y, estimated via a Gaussian copula.\n\n Parameters\n ----------\n x, y : array_like\n Continuous arrays of shape (n_epochs,) or (n_dimensions, n_epochs)\n relative: bool | False\n If true returns the normalized MI (MI(x,y)/H(x))\n\n Returns\n -------\n i : float\n Information shared by x and y in bits or undmensional if\n relative is True\n \"\"\"\n x, y = np.atleast_2d(x), np.atleast_2d(y)\n if x.ndim > 2 or y.ndim > 2:\n raise ValueError(\"x and y must be at most 2d\")\n nvarx, ntrl = x.shape\n\n if y.shape[1] != ntrl:\n raise ValueError(\"number of trials do not match\")\n\n # copula normalization\n cx, cy = copnorm_nd(x, axis=1), copnorm_nd(y, axis=1)\n # parametric Gaussian MI\n return mi_1d_gg(cx, cy, relative, True, True)\n\n\ndef mi_model_1d_gd(x, y, relative=False, biascorrect=True, demeaned=False):\n \"\"\"Mutual information between a Gaussian and a discrete variable in bits.\n\n This method is based on ANOVA style model comparison.\n I = mi_model_gd(x,y) returns the MI between the (possibly multidimensional)\n Gaussian variable x and the discrete variable y.\n\n Parameters\n ----------\n x, y : array_like\n Gaussian arrays of shape (n_epochs,) or (n_dimensions, n_epochs). y\n must be an array of integers\n relative: bool | False\n If true returns the normalized MI (MI(x,y)/H(x))\n biascorrect : bool | True\n Specifies whether bias correction should be applied to the estimated MI\n demeaned : bool | False\n Specifies whether the input data already has zero mean (true if it has\n been copula-normalized)\n\n Returns\n -------\n i : float\n Information shared by x and y in bits or undmensional if\n relative is True\n \"\"\"\n x, y = np.atleast_2d(x), np.squeeze(y)\n if x.ndim > 2:\n raise ValueError(\"x must be at most 2d\")\n if y.ndim > 1:\n raise ValueError(\"only univariate discrete variables supported\")\n if not np.issubdtype(y.dtype, int):\n raise ValueError(\"y should be an integer array\")\n\n nvarx, ntrl = x.shape\n ym = np.unique(y)\n\n if y.size != ntrl:\n raise ValueError(\"number of trials do not match\")\n\n if not demeaned:\n x = x - x.mean(axis=1)[:, np.newaxis]\n\n # class-conditional entropies\n ntrl_y = np.zeros(len(ym))\n hcond = np.zeros(len(ym))\n for n_yi, yi in enumerate(ym):\n idx = y == yi\n xm = x[:, idx]\n ntrl_y[n_yi] = xm.shape[1]\n xm = xm - xm.mean(axis=1)[:, np.newaxis]\n cm = np.dot(xm, xm.T) / float(ntrl_y[n_yi] - 1)\n chcm = np.linalg.cholesky(cm)\n hcond[n_yi] = np.sum(np.log(np.diagonal(chcm)))\n\n # class weights\n w = ntrl_y / float(ntrl)\n\n # unconditional entropy from unconditional Gaussian fit\n cx = np.dot(x, x.T) / float(ntrl - 1)\n chc = np.linalg.cholesky(cx)\n hunc = np.sum(np.log(np.diagonal(chc))) # + c*nvarx\n\n ln2 = np.log(2)\n if biascorrect:\n vars = np.arange(1, nvarx + 1)\n\n psiterms = sp.special.psi((ntrl - vars).astype(float) / 2.) / 2.\n dterm = (ln2 - np.log(float(ntrl - 1))) / 2.\n hunc = hunc - nvarx * dterm - psiterms.sum()\n\n dterm = (ln2 - np.log((ntrl_y - 1).astype(float))) / 2.0\n psiterms = np.zeros(len(ym))\n for vi in vars:\n idx = ntrl_y - vi\n psiterms = psiterms + sp.special.psi(idx.astype(float) / 2.)\n hcond = hcond - nvarx * dterm - (psiterms / 2.)\n\n i = hunc - np.sum(w * hcond)\n if relative:\n i = i / hunc\n else:\n i = i / ln2\n\n # MI in bits\n return i\n\n\ndef gcmi_model_1d_cd(x, y, relative=False):\n \"\"\"Gaussian-Copula MI between a continuous and a discrete variable.\n\n This method is based on ANOVA style model comparison.\n I = gcmi_model_cd(x,y,Ym) returns the MI between the (possibly\n multidimensional) continuous variable x and the discrete variable y.\n\n Parameters\n ----------\n x, y : array_like\n Continuous arrays of shape (n_epochs,) or (n_dimensions, n_epochs). y\n must be an array of integers\n relative: bool | False\n If true returns the normalized MI (MI(x,y)/H(x))\n\n Returns\n -------\n i : float\n Information shared by x and y in bits or undmensional if\n relative is True\n \"\"\"\n x, y = np.atleast_2d(x), np.squeeze(y)\n if x.ndim > 2:\n raise ValueError(\"x must be at most 2d\")\n if y.ndim > 1:\n raise ValueError(\"only univariate discrete variables supported\")\n if not np.issubdtype(y.dtype, int):\n raise ValueError(\"y should be an integer array\")\n\n nvarx, ntrl = x.shape\n\n if y.size != ntrl:\n raise ValueError(\"number of trials do not match\")\n\n # copula normalization\n cx = copnorm_nd(x, axis=1)\n # parametric Gaussian MI\n return mi_model_1d_gd(cx, y, relative, True, True)\n\n\ndef mi_mixture_1d_gd(x, y, relative=False):\n \"\"\"Mutual information between a Gaussian and a discrete variable in bits.\n\n This method evaluate MI from a Gaussian mixture.\n I = mi_mixture_gd(x,y) returns the MI between the (possibly\n multidimensional)\n\n Parameters\n ----------\n x, y : array_like\n Gaussian arrays of shape (n_epochs,) or (n_dimensions, n_epochs). y\n must be an array of integers\n relative: bool | False\n If true returns the normalized MI (MI(x,y)/H(x))\n\n Returns\n -------\n i : float\n Information shared by x and y in bits or undmensional if\n relative is True\n \"\"\"\n x, y = np.atleast_2d(x), np.squeeze(y)\n if x.ndim > 2:\n raise ValueError(\"x must be at most 2d\")\n if y.ndim > 1:\n raise ValueError(\"only univariate discrete variables supported\")\n if not np.issubdtype(y.dtype, int):\n raise ValueError(\"y should be an integer array\")\n\n nvarx, ntrl = x.shape\n ym = np.unique(y)\n\n if y.size != ntrl:\n raise ValueError(\"number of trials do not match\")\n\n # class-conditional entropies\n ntrl_y = np.zeros((len(ym),))\n hcond = np.zeros((len(ym),))\n m = np.zeros((len(ym), nvarx))\n w = np.zeros((len(ym),))\n cc = .5 * (np.log(2. * np.pi) + 1)\n c = np.zeros((len(ym), nvarx, nvarx))\n chc = np.zeros((len(ym), nvarx, nvarx))\n for n_yi, yi in enumerate(ym):\n # class conditional data\n idx = y == yi\n xm = x[:, idx]\n # class mean\n m[n_yi, :] = xm.mean(axis=1)\n ntrl_y[n_yi] = xm.shape[1]\n\n xm = xm - m[n_yi, :][:, np.newaxis]\n c[n_yi, :, :] = np.dot(xm, xm.T) / float(ntrl_y[n_yi] - 1)\n chc[n_yi, :, :] = np.linalg.cholesky(c[n_yi, :, :])\n hcond[n_yi] = np.sum(np.log(np.diagonal(chc[n_yi, :, :]))) + cc * nvarx\n\n # class weights\n w = ntrl_y / float(ntrl)\n\n # mixture entropy via unscented transform\n # See:\n # Huber, Bailey, Durrant-Whyte and Hanebeck\n # \"On entropy approximation for Gaussian mixture random vectors\"\n # http://dx.doi.org/10.1109/MFI.2008.4648062\n\n # Goldberger, Gordon, Greenspan\n # \"An efficient image similarity measure based on approximations of\n # KL-divergence between two Gaussian mixtures\"\n # http://dx.doi.org/10.1109/ICCV.2003.1238387\n d = nvarx\n ds = np.sqrt(nvarx)\n hmix = 0.0\n for yi in range(len(ym)):\n ps = ds * chc[yi, :, :].T\n thsm = m[yi, :, np.newaxis]\n # unscented points for this class\n usc = np.hstack([thsm + ps, thsm - ps])\n\n # class log-likelihoods at unscented points\n log_lik = np.zeros((len(ym), 2 * nvarx))\n for mi in range(len(ym)):\n # demean points\n dx = usc - m[mi, :, np.newaxis]\n # gaussian likelihood\n log_lik[mi, :] = _norm_innerv(\n dx, chc[mi, :, :]) - hcond[mi] + .5 * nvarx\n\n # log mixture likelihood for these unscented points\n # sum over classes, axis=0\n # logmixlik = sp.misc.logsumexp(log_lik, axis=0, b=w[:, np.newaxis])\n logmixlik = np.log(np.sum(w[:, np.newaxis] * np.exp(log_lik)))\n\n # add to entropy estimate (sum over unscented points for this class)\n hmix = hmix + w[yi] * logmixlik.sum()\n\n hmix = -hmix / (2 * d)\n\n i = hmix - np.sum(w * hcond)\n if relative:\n i = i / hmix\n else:\n i = i / ln2\n\n return i\n\n\ndef _norm_innerv(x, chc):\n \"\"\"Normalised innervations.\"\"\"\n m = np.linalg.solve(chc, x)\n w = -0.5 * (m * m).sum(axis=0)\n return w\n\n\ndef gcmi_mixture_1d_cd(x, y, relative=False):\n \"\"\"Gaussian-Copula MI between a continuous and a discrete variable.\n\n This method evaluate MI from a Gaussian mixture.\n\n The Gaussian mixture is fit using robust measures of location (median) and\n scale (median absolute deviation) for each class.\n I = gcmi_mixture_cd(x,y) returns the MI between the (possibly\n multidimensional).\n\n Parameters\n ----------\n x, y : array_like\n Continuous arrays of shape (n_epochs,) or (n_dimensions, n_epochs). y\n must be an array of integers\n\n Returns\n -------\n i : float\n Information shared by x and y (in bits)\n \"\"\"\n x, y = np.atleast_2d(x), np.squeeze(y)\n if x.ndim > 2:\n raise ValueError(\"x must be at most 2d\")\n if y.ndim > 1:\n raise ValueError(\"only univariate discrete variables supported\")\n if not np.issubdtype(y.dtype, int):\n raise ValueError(\"y should be an integer array\")\n\n nvarx, ntrl = x.shape\n ym = np.unique(y)\n\n if y.size != ntrl:\n raise ValueError(\"number of trials do not match\")\n\n # copula normalise each class\n # shift and rescale to match loc and scale of raw data\n # this provides a robust way to fit the gaussian mixture\n classdat = []\n ydat = []\n for yi in ym:\n # class conditional data\n idx = y == yi\n xm = x[:, idx]\n cxm = copnorm_nd(xm, axis=1)\n\n xmmed = np.median(xm, axis=1)[:, np.newaxis]\n # robust measure of s.d. under Gaussian assumption from median\n # absolute deviation\n xmmad = np.median(np.abs(xm - xmmed), axis=1)[:, np.newaxis]\n cxmscaled = cxm * (1.482602218505602 * xmmad)\n # robust measure of loc from median\n cxmscaled = cxmscaled + xmmed\n classdat.append(cxmscaled)\n ydat.append(yi * np.ones(xm.shape[1], dtype=int))\n\n cx = np.concatenate(classdat, axis=1)\n newy = np.concatenate(ydat)\n return mi_mixture_1d_gd(cx, newy, relative)\n\n\ndef cmi_1d_ggg(x, y, z, biascorrect=True, demeaned=False):\n \"\"\"Conditional MI between two Gaussian variables conditioned on a third.\n\n I = cmi_ggg(x,y,z) returns the CMI between two (possibly multidimensional)\n Gaussian variables, x and y, conditioned on a third, z, with bias\n correction.\n\n Parameters\n ----------\n x, y, z : array_like\n Gaussians arrays of shape (n_epochs,) or (n_dimensions, n_epochs).\n biascorrect : bool | True\n Specifies whether bias correction should be applied to the estimated MI\n demeaned : bool | False\n Specifies whether the input data already has zero mean (true if it has\n been copula-normalized)\n\n Returns\n -------\n i : float\n Information shared by x and y conditioned by z (in bits)\n \"\"\"\n x, y, z = np.atleast_2d(x), np.atleast_2d(y), np.atleast_2d(z)\n if x.ndim > 2 or y.ndim > 2 or z.ndim > 2:\n raise ValueError(\"x, y and z must be at most 2d\")\n ntrl = x.shape[1]\n nvarx = x.shape[0]\n nvary = y.shape[0]\n nvarz = z.shape[0]\n nvaryz = nvary + nvarz\n nvarxy = nvarx + nvary\n nvarxz = nvarx + nvarz\n nvarxyz = nvarx + nvaryz\n\n if y.shape[1] != ntrl or z.shape[1] != ntrl:\n raise ValueError(\"number of trials do not match\")\n\n # joint variable\n xyz = np.vstack((x, y, z))\n if not demeaned:\n xyz = xyz - xyz.mean(axis=1)[:, np.newaxis]\n cxyz = np.dot(xyz, xyz.T) / float(ntrl - 1)\n # submatrices of joint covariance\n cz = cxyz[nvarxy:, nvarxy:]\n cyz = cxyz[nvarx:, nvarx:]\n cxz = np.zeros((nvarxz, nvarxz))\n cxz[:nvarx, :nvarx] = cxyz[:nvarx, :nvarx]\n cxz[:nvarx, nvarx:] = cxyz[:nvarx, nvarxy:]\n cxz[nvarx:, :nvarx] = cxyz[nvarxy:, :nvarx]\n cxz[nvarx:, nvarx:] = cxyz[nvarxy:, nvarxy:]\n\n chcz = np.linalg.cholesky(cz)\n chcxz = np.linalg.cholesky(cxz)\n chcyz = np.linalg.cholesky(cyz)\n chcxyz = np.linalg.cholesky(cxyz)\n\n # entropies in nats\n # normalizations cancel for cmi\n hz = np.sum(np.log(np.diagonal(chcz)))\n hxz = np.sum(np.log(np.diagonal(chcxz)))\n hyz = np.sum(np.log(np.diagonal(chcyz)))\n hxyz = np.sum(np.log(np.diagonal(chcxyz)))\n\n ln2 = np.log(2)\n if biascorrect:\n psiterms = sp.special.psi(\n (ntrl - np.arange(1, nvarxyz + 1)).astype(float) / 2.) / 2.\n dterm = (ln2 - np.log(ntrl - 1.)) / 2.\n hz = hz - nvarz * dterm - psiterms[:nvarz].sum()\n hxz = hxz - nvarxz * dterm - psiterms[:nvarxz].sum()\n hyz = hyz - nvaryz * dterm - psiterms[:nvaryz].sum()\n hxyz = hxyz - nvarxyz * dterm - psiterms[:nvarxyz].sum()\n\n # MI in bits\n i = (hxz + hyz - hxyz - hz) / ln2\n return i\n\n\ndef gccmi_1d_ccc(x, y, z, biascorrect=True):\n \"\"\"Gaussian-Copula CMI between three continuous variables.\n\n I = gccmi_1d_ccc(x,y,z) returns the CMI between two (possibly\n multidimensional) continuous variables, x and y, conditioned on a third, z,\n estimated via a Gaussian copula.\n\n Parameters\n ----------\n x, y, z : array_like\n Continuous arrays of shape (n_epochs,) or (n_dimensions, n_epochs).\n\n Returns\n -------\n i : float\n Information shared by x and y conditioned by z (in bits)\n \"\"\"\n x, y, z = np.atleast_2d(x), np.atleast_2d(y), np.atleast_2d(z)\n if x.ndim > 2 or y.ndim > 2 or z.ndim > 2:\n raise ValueError(\"x, y and z must be at most 2d\")\n\n nvarx, ntrl = x.shape\n\n if y.shape[1] != ntrl or z.shape[1] != ntrl:\n raise ValueError(\"number of trials do not match\")\n\n # copula normalization\n cx = copnorm_nd(x, axis=1)\n cy = copnorm_nd(y, axis=1)\n cz = copnorm_nd(z, axis=1)\n # parametric Gaussian CMI\n return cmi_1d_ggg(cx, cy, cz, biascorrect=True, demeaned=True)\n\n\ndef cmi_1d_ggd(x, y, z, biascorrect=True, demeaned=False):\n \"\"\"MI between 2 continuous variables conditioned on a discrete variable.\n\n I = cmi_1d_ggd(x,y,z) returns the CMI between two (possibly\n multidimensional) continuous variables, x and y, conditioned on a third\n discrete variable z, estimated via a Gaussian copula.\n\n Parameters\n ----------\n x, y : array_like\n Continuous arrays of shape (n_epochs,) or (n_dimensions, n_epochs).\n z : array_like\n Discret array of shape (n_epochs,)\n\n Returns\n -------\n cmi : float\n Conditional Mutual Information shared by x and y conditioned by z\n (in bits)\n \"\"\"\n x = np.atleast_2d(x)\n y = np.atleast_2d(y)\n if x.ndim > 2 or y.ndim > 2:\n raise ValueError(\"x and y must be at most 2d\")\n if z.ndim > 1:\n raise ValueError(\"only univariate discrete variables supported\")\n if not np.issubdtype(z.dtype, int):\n raise ValueError(\"z should be an integer array\")\n\n nvarx, ntrl = x.shape\n u_z = np.unique(z)\n\n if y.shape[1] != ntrl or z.size != ntrl:\n raise ValueError(\"number of trials do not match\")\n\n # calculate gcmi for each z value\n icond = np.zeros((len(u_z),))\n pz = np.zeros((len(u_z),))\n for n_z, zi in enumerate(u_z):\n idx = z == zi\n thsx, thsy = x[:, idx], y[:, idx]\n pz[n_z] = idx.sum()\n icond[n_z] = mi_1d_gg(thsx, thsy, biascorrect=biascorrect,\n demeaned=demeaned)\n\n pz /= float(ntrl)\n\n # conditional mutual information\n cmi = np.sum(pz * icond)\n return cmi\n\n\ndef gccmi_1d_ccd(x, y, z, biascorrect=True, demeaned=False):\n \"\"\"GCCMI between 2 continuous variables conditioned on a discrete variable.\n\n I = gccmi_ccd(x,y,z) returns the CMI between two (possibly\n multidimensional) continuous variables, x and y, conditioned on a third\n discrete variable z, estimated via a Gaussian copula.\n\n Parameters\n ----------\n x, y : array_like\n Continuous arrays of shape (n_epochs,) or (n_dimensions, n_epochs).\n z : array_like\n Discret array of shape (n_epochs,)\n\n Returns\n -------\n cmi : float\n Conditional Mutual Information shared by x and y conditioned by z\n (in bits)\n \"\"\"\n x, y = np.atleast_2d(x), np.atleast_2d(y)\n x = copnorm_cat_nd(x, z, axis=-1)\n y = copnorm_cat_nd(y, z, axis=-1)\n return cmi_1d_ggd(x, y, z, biascorrect=biascorrect, demeaned=True)\n" ]
[ [ "numpy.dot", "numpy.median", "numpy.exp", "numpy.issubdtype", "numpy.concatenate", "numpy.log", "numpy.arange", "numpy.sqrt", "numpy.linalg.cholesky", "numpy.vstack", "numpy.atleast_2d", "numpy.zeros", "numpy.hstack", "numpy.squeeze", "numpy.sum", "numpy.ones", "numpy.diagonal", "numpy.linalg.solve", "numpy.abs", "numpy.unique" ] ]
shilpiBose29/CIS519-Project
[ "e2ce2f3c7e6e313d262d049721c8314ef5595dfa" ]
[ "top_users/find_reviews_by_top_users.py" ]
[ "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 23 09:56:46 2017\n\n@author: arranzignacio\n\"\"\"\n\nimport pandas as pd\n\nfilename = 'all_reviews.csv'\nall_cities = pd.read_csv(filename, sep=',', header=0)\n\n#unique_listings = all_cities.listing_id.unique()\n#unique_reviewers = all_cities.reviewer_id.unique()\n#unique_reviews = all_cities.id.unique()\n\nreviewers = all_cities.groupby(['reviewer_id'])['id'].count().reset_index()\n\n#create files with repeat users\n\nrepetitions = [2,3,4,7,10,15]\n\nindice = [1,2,3,6,9,14]\n\nfor i in range(len(indice)):\n top_users = reviewers.loc[reviewers['id'] > indice[i]]\n \n users_list = top_users.reviewer_id\n \n reviews_reduced = all_cities[all_cities.reviewer_id.isin(users_list)]\n \n file_name = \"reviews_by_top_users_\"+str(repetitions[i])+\"_reviews.csv\"\n\n reviews_reduced.to_csv(file_name, sep=',', encoding='utf-8')\n print(i)\n\nprint('done')\n\n" ]
[ [ "pandas.read_csv" ] ]
cion94/nlp
[ "4a81948a64c0f1c1e7036c25c3a5e9a89b2d074d" ]
[ "labs/lab6.py" ]
[ "# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nimport pandas as pd\nimport csv\nimport numpy as np\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\n\n\n# %%\nglove_data_file = \"glove.6B/glove.6B.50d.txt\"\n\n\n# %%\ndf = pd.read_csv(glove_data_file, sep=\" \", index_col=0, header=None, quoting=csv.QUOTE_NONE)\n\n\n# %%\nn_components = 2\n\n\n# %%\ndfsample = df[:50]\n\n\n# %%\ndfsample.index.values\n\n\n# %%\npca = PCA(n_components=n_components)\ncomponents = pca.fit_transform(dfsample)\n\n# %% [markdown]\n# ## PCA in two dimensions\n\n# %%\nplt.scatter(components[:,0], components[:,1])\n\nfor i in range(components.shape[0]):\n plt.annotate(df.index.values[i], (components[i,0], components[i,1]))\n\n\n# %%\ncomponents.shape[0]\n\n\n# %%\nnparr = dfsample.to_numpy()\n\n# %% [markdown]\n# ## Plot with 2 random dimensions\n\n# %%\nplt.scatter(nparr[:,5], nparr[:,7])\n\nfor i in range(components.shape[0]):\n plt.annotate(df.index.values[i], (nparr[i,5], nparr[i,7]))\n\n# %% [markdown]\n# \n\n# %%\n\n\n\n" ]
[ [ "matplotlib.pyplot.scatter", "pandas.read_csv", "matplotlib.pyplot.annotate", "sklearn.decomposition.PCA" ] ]
Bhaskers-Blu-Org1/visual-insights-custom-models
[ "a92ea6e537c8fc4aa92d9298e628e68a1716a346" ]
[ "object_detection/src/train.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSource: https://www.ibm.com/support/knowledgecenter/SSRU69_1.2.0/base/vision_prepare_custom_train.html\n\"\"\"\n\nfrom PIL import Image\nimport numpy as np\n# import tensorflow as tf\nimport keras\nfrom sklearn.model_selection import train_test_split\nimport logging as log\nimport os\n\nBATCH_SIZE = 16\n\n# Import required by Visual Insights\nfrom train_interface import TrainCallback\n\n# Import SSD7 model package\nimport SSD7\n\n\nclass MyTrain(TrainCallback):\n def __init__(self):\n log.info(\"CALL MyTrain.__init__\")\n # Define maximum image size (width, height)\n # Images will NOT be resized and must be all of the same size (see load_img method)\n # If you want to resize them, make sure to update their labels accordingly\n self.img_size = (500, 660)\n\n\n def onPreprocessing(self, labels, images, workspace_path, params):\n \"\"\" \n Callback for dataset preprocessing \n Params:\n labels: dict of \"category\" -> index\n images: \n if classification, dict of \"image path\" -> \"category\"\n if detection, list of annotation objects\n (attributes: 'filename', 'size', 'object': list of boxes, each with attrs 'label' and 'bbox')\n workspace_path: recommended temporary workspace\n params: dict of \"parameter\" -> value\n Return: None\n \"\"\"\n log.info(\"CALL MyTrain.onPreprocessing\")\n log.info(\"params: %s\", params)\n # store parameters (defined\n self.params = params\n\n # Define custom model architecture #############################################\n # model from https://github.com/pierluigiferrari/ssd_keras\n self.model = SSD7.build_model(image_size=self.img_size + (3,), n_classes=len(labels))\n adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n\n self.model.compile(optimizer=adam, loss=SSD7.SSDLoss().compute_loss)\n \n # display model architecture summary in logs\n self.model.summary(print_fn=log.info)\n\n\n # Load and preprocess data #####################################################\n X = []\n y = []\n log.info(\"Loading images...\")\n for img in images:\n with Image.open(img.filename) as img_data:\n X.append(np.array(img_data, dtype=np.uint8))\n # class 0 is for background, shift labels of 1\n y.append(np.array([[labels[lbl.label] + 1] + lbl.bbox for lbl in img.objects]))\n\n # Encode input data for SSD\n predictor_sizes = [self.model.get_layer('classes4').output_shape[1:3],\n self.model.get_layer('classes5').output_shape[1:3],\n self.model.get_layer('classes6').output_shape[1:3],\n self.model.get_layer('classes7').output_shape[1:3]]\n\n self.ssd_input_encoder = SSD7.SSDInputEncoder(\n img_height=self.img_size[0],\n img_width=self.img_size[1],\n n_classes=len(labels),\n predictor_sizes=predictor_sizes)\n \n X = np.array(X)\n y_encoded = np.array(self.ssd_input_encoder(y))\n\n # split data -> 20% test / 80% train (using sklearn function)\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y_encoded, test_size=0.2)\n self.dataset_size = self.X_train.shape[0]\n\n log.info(\"self.X_train.shape = %s\", self.X_train.shape)\n log.info(\"self.X_test.shape = %s\", self.X_test.shape)\n log.info(\"self.y_train.shape = %s\", self.y_train.shape)\n log.info(\"self.y_test.shape = %s\", self.y_test.shape)\n\n\n\n def onTraining(self, monitor_handler):\n \"\"\"\n Callback for training\n Params:\n monitor_handler: MonitorHandler object for train/test status monitoring\n (see https://www.ibm.com/support/knowledgecenter/SSRU69_1.2.0/base/vision_custom_api.html\n section \"Monitoring and reporting statistics\")\n Return: None\n \"\"\"\n log.info(\"CALL MyTrain.onTraining\")\n\n # function that takes logs (dictionnary containing loss and accuracy values)\n # and calls the monitor_handler methods to update metrics:\n # * training loss (in updateTrainMetrics)\n # * testing loss and accuracy (in updateTestMetrics)\n # Note: the SSD7 does not give accuracy, we log 0.0 instead\n # allowing live graph plot in Visual Insights during training\n def logMetrics(epoch, logs):\n current_iter = (epoch + 1) * self.dataset_size / BATCH_SIZE\n monitor_handler.updateTrainMetrics(\n current_iter,\n int(self.params[\"max_iter\"]),\n 0.0, # loss_cls\n logs[\"loss\"], # loss_bbox\n epoch+1)\n # If you compute an accuracy (mean average precision) on the test set, you can report it here\n # monitor_handler.updateTestMetrics(my_accuracy)\n\n # launch training using the data we loaded in `onPreprocessing`\n # at the end of each epoch, call the `logMetrics` function as a callback\n # see https://keras.io/callbacks/\n self.model.fit(self.X_train, self.y_train, batch_size=BATCH_SIZE, \n epochs=int(int(self.params[\"max_iter\"]) * BATCH_SIZE / self.dataset_size),\n validation_data=(self.X_test, self.y_test),\n callbacks=[keras.callbacks.LambdaCallback(on_epoch_end=logMetrics)])\n\n\n\n\n def onCompleted(self, model_path):\n \"\"\"\n Callback for successful training completion -> used to save model\n Params:\n model_path: absolute model filepath\n Return:\n None\n \"\"\"\n # if training successful then store the resulting model\n model_file = os.path.join(model_path, \"model.h5\")\n log.info(\"CALL MyTrain.onCompleted\")\n log.info(\"[model_file] Saving model to %s\", model_file)\n self.model.save(model_file)\n\n\n def onFailed(self, train_status, e, tb_message):\n \"\"\" \n Callback for failed training completion\n Params:\n train_status: training status when failure occurred\n e: Exception object\n tb_message: formatted traceback\n Return: None\n \"\"\"\n # if training failed then log and raise the error \n log.info(\"CALL MyTrain.onFailed\")\n log.error(\"Train status: %s\", train_status)\n log.error(\"Traceback message: %s\", tb_message)\n log.exception(e)\n\n\n def load_img(self, path):\n # given an image path, load the image\n # returns a numpy.array of the shape of the image\n img = np.array(Image.open(path), dtype=np.uint8)\n assert(img.shape == self.img_size + (3,))\n return img\n\n" ]
[ [ "sklearn.model_selection.train_test_split", "numpy.array" ] ]
lc0/adanet
[ "b5007ae556a7387b9bda340fdf82649edd740a9e" ]
[ "adanet/core/estimator.py" ]
[ "\"\"\"An AdaNet estimator implementation in Tensorflow using a single graph.\n\nCopyright 2018 The AdaNet Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport errno\nimport inspect\nimport os\nimport time\n\nfrom absl import logging\nfrom adanet import tf_compat\nfrom adanet.core.architecture import _Architecture\nfrom adanet.core.candidate import _CandidateBuilder\nfrom adanet.core.ensemble_builder import _EnsembleBuilder\nfrom adanet.core.ensemble_builder import _SubnetworkManager\nfrom adanet.core.eval_metrics import call_eval_metrics\nfrom adanet.core.iteration import _IterationBuilder\nfrom adanet.core.report_accessor import _ReportAccessor\nfrom adanet.core.summary import _ScopedSummary\nfrom adanet.core.summary import _TPUScopedSummary\nfrom adanet.core.timer import _CountDownTimer\nfrom adanet.distributed import ReplicationStrategy\nfrom adanet.distributed.devices import monkey_patch_default_variable_placement_strategy\nfrom adanet.ensemble import ComplexityRegularizedEnsembler\nfrom adanet.ensemble import GrowStrategy\nimport numpy as np\nimport six\nimport tensorflow as tf\n\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.util import deprecation\nfrom tensorflow_estimator.python.estimator import util\n# pylint: enable=g-direct-tensorflow-import\n\n\nclass _StopAfterTrainingHook(tf_compat.SessionRunHook):\n \"\"\"Hook that requests stop once iteration is over.\"\"\"\n\n def __init__(self, iteration, after_fn):\n \"\"\"Initializes a `_StopAfterTrainingHook`.\n\n Args:\n iteration: An `_Iteration` instance.\n after_fn: A function to call after training stopped.\n\n Returns:\n A `_StopAfterTrainingHook` instance.\n \"\"\"\n\n self._iteration = iteration\n self._after_fn = after_fn\n\n def before_run(self, run_context):\n \"\"\"See `SessionRunHook`.\"\"\"\n\n self._stop_if_is_over(run_context)\n\n def after_run(self, run_context, run_values):\n \"\"\"See `SessionRunHook`.\"\"\"\n\n self._stop_if_is_over(run_context)\n\n def _stop_if_is_over(self, run_context):\n \"\"\"Signals the monitored session to step when the iteration is over.\"\"\"\n\n if not self._iteration.train_manager.is_over():\n return\n logging.info(\"Now stopping iteration %d training\", self._iteration.number)\n run_context.request_stop()\n self._after_fn()\n\n\nclass _EvalMetricSaverHook(tf_compat.SessionRunHook):\n \"\"\"A hook for writing candidate evaluation metrics as summaries to disk.\"\"\"\n\n def __init__(self, name, kind, eval_metrics, output_dir):\n \"\"\"Initializes a `_EvalMetricSaverHook` instance.\n\n Args:\n name: String name of candidate owner of these metrics.\n kind: The kind of candidate that the metrics belong to (e.g. subnetwork).\n eval_metrics: Tuple of (metric_fn, tensors) which returns a dict of metric\n results keyed by name. The values of the dict are the results of calling\n a metric function, namely a `(metric_tensor, update_op)` tuple.\n `metric_tensor` should be evaluated without any impact on state\n (typically is a pure computation based on variables.). For example, it\n should not trigger the `update_op` or require any input fetching.\n output_dir: Directory for writing evaluation summaries.\n\n Returns:\n An `_EvalMetricSaverHook` instance.\n \"\"\"\n\n self._name = name\n self._kind = kind\n self._eval_metrics = eval_metrics\n self._output_dir = output_dir\n\n def begin(self):\n \"\"\"See `SessionRunHook`.\"\"\"\n\n # The metric_fn is called with tf.placeholders to simply read the value of\n # the metric variables. The metrics themselves are computed as a result of\n # being returned in the EstimatorSpec by _adanet_model_fn.\n metric_fn, tensors = self._eval_metrics\n tensors = [tf_compat.v1.placeholder(t.dtype, t.shape) for t in tensors]\n eval_metric_ops = metric_fn(*tensors)\n self._eval_metric_tensors = {}\n for key in sorted(eval_metric_ops):\n value = tf_compat.metric_op(eval_metric_ops[key])\n self._eval_metric_tensors[key] = value[0]\n\n def _dict_to_str(self, dictionary):\n \"\"\"Get a `str` representation of a `dict`.\n\n Args:\n dictionary: The `dict` to be represented as `str`.\n\n Returns:\n A `str` representing the `dictionary`.\n \"\"\"\n return \", \".join(\n \"{} = {}\".format(k, v) for k, v in sorted(dictionary.items()))\n\n def end(self, session):\n \"\"\"See `SessionRunHook`.\"\"\"\n\n # Forked from tensorflow/python/estimator/estimator.py function called\n # _write_dict_to_summary.\n current_global_step = tf_compat.v1.train.get_global_step()\n eval_dict, current_global_step = session.run(\n (self._eval_metric_tensors, current_global_step))\n\n logging.info(\"Saving %s '%s' dict for global step %d: %s\", self._kind,\n self._name, current_global_step, self._dict_to_str(eval_dict))\n summary_writer = tf_compat.v1.summary.FileWriterCache.get(self._output_dir)\n summary_proto = tf_compat.v1.summary.Summary()\n for key in eval_dict:\n value = eval_dict[key]\n if isinstance(value, (np.float32, float)):\n summary_proto.value.add(tag=key, simple_value=float(value))\n elif isinstance(value, six.binary_type):\n summ = tf_compat.v1.summary.Summary.FromString(value)\n for i, _ in enumerate(summ.value):\n summ.value[i].tag = \"{}/{}\".format(key, i)\n summary_proto.value.extend(summ.value)\n else:\n logging.warn(\n \"Skipping summary for %s, must be a float, np.float32, \"\n \"or a serialized string of Summary.\", key)\n summary_writer.add_summary(summary_proto, current_global_step)\n summary_writer.flush()\n # Note(b/137672676): Do not explicitly call summary_writer.close() here.\n # This will cause eval summaries to not be written out after the first time\n # in continuous evals.\n\n\nclass _OverwriteCheckpointHook(tf_compat.SessionRunHook):\n \"\"\"Hook to overwrite the latest checkpoint with next iteration variables.\"\"\"\n\n def __init__(self, current_iteration, iteration_number_tensor,\n previous_iteration_vars, config):\n \"\"\"Initializes an _OverwriteCheckpointHook instance.\n\n Args:\n current_iteration: Current `_Iteration` object.\n iteration_number_tensor: Int variable `Tensor` storing the current\n iteration number.\n previous_iteration_vars: Variables to restore from the previous iteration\n before overwriting the checkpoint.\n config: The Estimator's RunConfig object.\n \"\"\"\n\n self._iteration_number = current_iteration.number\n self._iteration_number_tensor = iteration_number_tensor\n self._previous_iteration_vars = previous_iteration_vars\n self._model_dir = config.model_dir\n self._checkpoint_state = tf.train.get_checkpoint_state(self._model_dir)\n self._keep_checkpoint_max = config.keep_checkpoint_max\n\n self._update_op = None\n self._overwrite_saver = None\n self._checkpoint_overwritten = False\n\n def begin(self):\n \"\"\"Creates the savers and adds ops needed for overwriting the checkpoint.\n\n Two savers are created, a restore saver which is passed the variables from\n the previous iteration to restore, and an overwrite saver which will\n actually overwrite the checkpoint.\n \"\"\"\n\n self._restore_saver = tf_compat.v1.train.Saver(\n sharded=True, var_list=self._previous_iteration_vars)\n # Note: self._iteration_number already contains the value of the next\n # iteration since _OverwriteCheckpointHook should only execute during the\n # graph growing phase.\n self._update_op = self._iteration_number_tensor.assign(\n self._iteration_number)\n self._overwrite_saver = tf_compat.v1.train.Saver(\n sharded=True, max_to_keep=self._keep_checkpoint_max)\n self._overwrite_saver.recover_last_checkpoints(\n self._checkpoint_state.all_model_checkpoint_paths)\n\n def before_run(self, run_context):\n \"\"\"Overwrites checkpoint before any calls to session.run().\n\n This is to ensure that the values of the variables in the overwritten\n checkpoint match those in the pevious iteration checkpoint.\n\n Args:\n run_context: The tf.train.SessionRunContext passed to the hook.\n \"\"\"\n\n if not self._checkpoint_overwritten:\n session = run_context.session\n self._restore_saver.restore(session,\n self._checkpoint_state.model_checkpoint_path)\n session.run(self._update_op)\n checkpoint_path = os.path.join(self._model_dir, \"increment.ckpt\")\n logging.info(\n \"Overwriting checkpoint with new graph for iteration %d to %s-%d\",\n self._iteration_number, checkpoint_path, self._iteration_number)\n # Specify global_step=self._iteration_number to append the iteration\n # number to the checkpoint name, e.g. <model_dir>/increment.ckpt-1.\n self._overwrite_saver.save(\n session, checkpoint_path, global_step=self._iteration_number)\n self._checkpoint_overwritten = True\n\n\nclass _GraphGrowingHookDecorator(tf_compat.SessionRunHook):\n \"\"\"Decorates a SessionRunHook to only run begin() and end() methods.\"\"\"\n\n def __init__(self, hook):\n \"\"\"Initializes a _GraphGrowingHookDecorator instance.\n\n Args:\n hook: The SessionRunHook to decorate.\n \"\"\"\n self._hook = hook\n\n def begin(self):\n self._hook.begin()\n\n def end(self, session):\n self._hook.end(session)\n\n\ndef _delete_directory(directory):\n \"\"\"Removes directory and handles any folder or file exceptions.\"\"\"\n if not tf.io.gfile.exists(directory):\n return\n try:\n tf.io.gfile.rmtree(directory)\n except (tf.errors.PermissionDeniedError,\n tf.errors.FailedPreconditionError) as e:\n logging.info(\"Ignoring folder or file issues: %s '%s'\", e.error_code,\n e.message)\n\n\nclass Estimator(tf.estimator.Estimator):\n # pyformat: disable\n r\"\"\"A :class:`tf.estimator.Estimator` for training, evaluation, and serving.\n\n This implementation uses an :class:`adanet.subnetwork.Generator` as its weak\n learning algorithm for generating candidate subnetworks. These are trained in\n parallel using a single graph per iteration. At the end of each iteration, the\n estimator saves the sub-graph of the best subnetwork ensemble and its weights\n as a separate checkpoint. At the beginning of the next iteration, the\n estimator imports the previous iteration's frozen graph and adds ops for the\n next candidates as part of a new graph and session. This allows the estimator\n have the performance of Tensorflow's static graph constraint (minus the\n performance hit of reconstructing a graph between iterations), while having\n the flexibility of having a dynamic graph.\n\n NOTE: Subclassing :class:`tf.estimator.Estimator` is only necessary to work\n with :meth:`tf.estimator.train_and_evaluate` which asserts that the estimator\n argument is a :class:`tf.estimator.Estimator` subclass. However, all training\n is delegated to a separate :class:`tf.estimator.Estimator` instance. It is\n responsible for supporting both local and distributed training. As such, the\n :class:`adanet.Estimator` is only responsible for bookkeeping across\n iterations.\n\n Args:\n head: A :class:`tf.contrib.estimator.Head` instance for computing loss and\n evaluation metrics for every candidate.\n subnetwork_generator: The :class:`adanet.subnetwork.Generator` which defines\n the candidate subnetworks to train and evaluate at every AdaNet iteration.\n max_iteration_steps: Total number of steps for which to train candidates per\n iteration. If :class:`OutOfRange` or :class:`StopIteration` occurs in the\n middle, training stops before `max_iteration_steps` steps. When\n :code:`None`, it will train the current iteration forever.\n ensemblers: An iterable of :class:`adanet.ensemble.Ensembler` objects that\n define how to ensemble a group of subnetworks. If there are multiple,\n each should have a different `name` property.\n ensemble_strategies: An iterable of :class:`adanet.ensemble.Strategy`\n objects that define the candidate ensembles of subnetworks to explore at\n each iteration.\n evaluator: An :class:`adanet.Evaluator` for candidate selection after all\n subnetworks are done training. When :code:`None`, candidate selection uses\n a moving average of their :class:`adanet.Ensemble` AdaNet loss during\n training instead. In order to use the *AdaNet algorithm* as described in\n [Cortes et al., '17], the given :class:`adanet.Evaluator` must be created\n with the same dataset partition used during training. Otherwise, this\n framework will perform *AdaNet.HoldOut* which uses a holdout set for\n candidate selection, but does not benefit from learning guarantees.\n report_materializer: An :class:`adanet.ReportMaterializer`. Its reports are\n made available to the `subnetwork_generator` at the next iteration, so\n that it can adapt its search space. When `None`, the\n `subnetwork_generator` :meth:`generate_candidates` method will receive\n empty Lists for their `previous_ensemble_reports` and `all_reports`\n arguments.\n metric_fn: A function for adding custom evaluation metrics, which should\n obey the following signature:\n - `Args`:\n Can only have the following three arguments in any order:\n - :code:`predictions`: Predictions `Tensor` or dict of `Tensor`\n created by given :code:`head`.\n - :code:`features`: Input `dict` of `Tensor` objects created by\n :code:`input_fn` which is given to :meth:`estimator.evaluate` as an\n argument.\n - :code:`labels`: Labels `Tensor` or dict of `Tensor` (for multi-head)\n created by :code:`input_fn` which is given to\n :meth:`estimator.evaluate` as an argument.\n - `Returns`: Dict of metric results keyed by name. Final metrics are a\n union of this and :code:`head`'s existing metrics. If there is a name\n conflict between this and :code:`head`s existing metrics, this will\n override the existing one. The values of the dict are the results of\n calling a metric function, namely a :code:`(metric_tensor, update_op)`\n tuple.\n force_grow: Boolean override that forces the ensemble to grow by one\n subnetwork at the end of each iteration. Normally at the end of each\n iteration, AdaNet selects the best candidate ensemble according to its\n performance on the AdaNet objective. In some cases, the best ensemble is\n the `previous_ensemble` as opposed to one that includes a newly trained\n subnetwork. When `True`, the algorithm will not select the\n `previous_ensemble` as the best candidate, and will ensure that after n\n iterations the final ensemble is composed of n subnetworks.\n replicate_ensemble_in_training: Whether to rebuild the frozen subnetworks of\n the ensemble in training mode, which can change the outputs of the frozen\n subnetworks in the ensemble. When `False` and during candidate training,\n the frozen subnetworks in the ensemble are in prediction mode, so\n training-only ops like dropout are not applied to them. When `True` and\n training the candidates, the frozen subnetworks will be in training mode\n as well, so they will apply training-only ops like dropout. This argument\n is useful for regularizing learning mixture weights, or for making\n training-only side inputs available in subsequent iterations. For most\n use-cases, this should be `False`.\n adanet_loss_decay: Float decay for the exponential-moving-average of the\n AdaNet objective throughout training. This moving average is a data-\n driven way tracking the best candidate with only the training set.\n delay_secs_per_worker: Float number of seconds to delay starting the\n i-th worker. Staggering worker start-up during distributed asynchronous\n SGD can improve training stability and speed up convergence. Each worker\n will wait (i+1) * delay_secs_per_worker seconds before beginning training.\n max_worker_delay_secs: Float max number of seconds to delay starting the\n i-th worker. Staggering worker start-up during distributed asynchronous\n SGD can improve training stability and speed up convergence. Each worker\n will wait up to max_worker_delay_secs before beginning training.\n worker_wait_secs: Float number of seconds for workers to wait before\n checking if the chief prepared the next iteration.\n worker_wait_timeout_secs: Float number of seconds for workers to wait for\n chief to prepare the next iteration during distributed training. This is\n needed to prevent workers waiting indefinitely for a chief that may have\n crashed or been turned down. When the timeout is exceeded, the worker\n exits the train loop. In situations where the chief job is much slower\n than the worker jobs, this timeout should be increased.\n model_dir: Directory to save model parameters, graph and etc. This can also\n be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n report_dir: Directory where the\n :class:`adanet.subnetwork.MaterializedReport`s materialized by\n :code:`report_materializer` would be saved. If :code:`report_materializer`\n is :code:`None`, this will not save anything. If :code:`None` or\n empty string, defaults to :code:`<model_dir>/report`.\n config: :class:`RunConfig` object to configure the runtime settings.\n debug: Boolean to enable debug mode which will check features and labels\n for Infs and NaNs.\n enable_ensemble_summaries: Whether to record summaries to display in\n TensorBoard for each ensemble candidate. Disable to reduce memory and disk\n usage per run.\n enable_subnetwork_summaries: Whether to record summaries to display in\n TensorBoard for each subnetwork. Disable to reduce memory and disk usage\n per run.\n global_step_combiner_fn: Function for combining each subnetwork's\n iteration step into the global step.\n max_iterations: Integer maximum number of AdaNet iterations (a.k.a. rounds)\n of generating new subnetworks and ensembles, training them, and evaluating\n them against the current best ensemble. When :code:`None`, AdaNet will\n keep iterating until `Estimator#train` terminates. Otherwise, if\n :code:`max_iteratios` is supplied and is met or exceeded during training,\n training will terminate even before `steps` or `max_steps`.\n export_subnetwork_logits: Whether to include subnetwork logits in exports.\n export_subnetwork_last_layer: Whether to include subnetwork last layer in\n exports.\n replay_config: Optional :class:`adanet.replay.Config` to specify a previous\n AdaNet run to replay. Given the exact same search space but potentially\n different training data, the `replay_config` causes the estimator to\n reconstruct the previously trained model without performing a search.\n NOTE: The previous run must have executed with identical hyperparameters\n as the new run in order to be replayable. The only supported difference is\n that the underlying data can change.\n **kwargs: Extra keyword args passed to the parent.\n\n Returns:\n An :class:`adanet.Estimator` instance.\n\n Raises:\n :code:`ValueError`: If :code:`subnetwork_generator` is :code:`None`.\n :code:`ValueError`: If :code:`max_iteration_steps` is <= 0.\n :code:`ValueError`: If :code:`model_dir` is not specified during distributed\n training.\n :code:`ValueError`: If :code:`max_iterations` is <= 0.\n \"\"\"\n # pyformat: enable\n\n class _Keys(object):\n CURRENT_ITERATION = \"current_iteration\"\n SUBNETWORK_GENERATOR = \"subnetwork_generator\"\n\n def __init__(self,\n head,\n subnetwork_generator,\n max_iteration_steps,\n ensemblers=None,\n ensemble_strategies=None,\n evaluator=None,\n report_materializer=None,\n metric_fn=None,\n force_grow=False,\n replicate_ensemble_in_training=False,\n adanet_loss_decay=.9,\n delay_secs_per_worker=5,\n max_worker_delay_secs=60,\n worker_wait_secs=5,\n worker_wait_timeout_secs=7200,\n model_dir=None,\n report_dir=None,\n config=None,\n debug=False,\n enable_ensemble_summaries=True,\n enable_subnetwork_summaries=True,\n global_step_combiner_fn=tf.math.reduce_mean,\n max_iterations=None,\n export_subnetwork_logits=False,\n export_subnetwork_last_layer=True,\n replay_config=None,\n **kwargs):\n if subnetwork_generator is None:\n raise ValueError(\"subnetwork_generator can't be None.\")\n if max_iteration_steps is not None and max_iteration_steps <= 0.:\n raise ValueError(\"max_iteration_steps must be > 0 or None.\")\n if max_iterations is not None and max_iterations <= 0.:\n raise ValueError(\"max_iterations must be > 0 or None.\")\n is_distributed_training = config and config.num_worker_replicas > 1\n is_model_dir_specified = model_dir or (config and config.model_dir)\n if is_distributed_training and not is_model_dir_specified:\n # A common model dir for the chief and workers is required for\n # coordination during distributed training.\n raise ValueError(\n \"For distributed training, a model_dir must be specified.\")\n\n self._subnetwork_generator = subnetwork_generator\n\n # Overwrite superclass's assert that members are not overwritten in order\n # to overwrite public methods. Note that we are doing something that is not\n # explicitly supported by the Estimator API and may break in the future.\n tf.estimator.Estimator._assert_members_are_not_overridden = staticmethod( # pylint: disable=protected-access\n lambda _: None)\n\n self._evaluator = evaluator\n self._report_materializer = report_materializer\n\n self._force_grow = force_grow\n self._delay_secs_per_worker = delay_secs_per_worker\n self._max_worker_delay_secs = max_worker_delay_secs\n self._worker_wait_secs = worker_wait_secs\n self._worker_wait_timeout_secs = worker_wait_timeout_secs\n self._max_iterations = max_iterations\n self._replay_config = replay_config\n\n # Added for backwards compatibility.\n default_ensembler_args = [\n \"mixture_weight_type\", \"mixture_weight_initializer\",\n \"warm_start_mixture_weights\", \"adanet_lambda\", \"adanet_beta\", \"use_bias\"\n ]\n default_ensembler_kwargs = {\n k: v for k, v in kwargs.items() if k in default_ensembler_args\n }\n if default_ensembler_kwargs:\n logging.warning(\n \"The following arguments have been moved to \"\n \"`adanet.ensemble.ComplexityRegularizedEnsembler` which can be \"\n \"specified in the `ensemblers` argument: %s\",\n sorted(default_ensembler_kwargs.keys()))\n for key in default_ensembler_kwargs:\n del kwargs[key]\n\n # Experimental feature.\n placement_strategy_arg = \"experimental_placement_strategy\"\n placement_strategy = kwargs.pop(placement_strategy_arg, None)\n if placement_strategy:\n logging.warning(\n \"%s is an experimental feature. Its behavior is not guaranteed \"\n \"to be backwards compatible.\", placement_strategy_arg)\n\n # Monkey patch the default variable placement strategy that Estimator uses\n # since it does not support workers having different graphs from the chief.\n # TODO: Consider using `RunConfig.replace` with the new device_fn,\n # but this can cause issues since RunConfig automatically parses TF_CONFIG\n # environment variable.\n with monkey_patch_default_variable_placement_strategy():\n # This `Estimator` is responsible for bookkeeping across iterations, and\n # for training the subnetworks in both a local and distributed setting.\n # Subclassing improves future-proofing against new private methods being\n # added to `tf.estimator.Estimator` that are expected to be callable by\n # external functions, such as in b/110435640.\n super(Estimator, self).__init__(\n model_fn=self._adanet_model_fn,\n params={},\n config=config,\n model_dir=model_dir,\n **kwargs)\n\n if default_ensembler_kwargs and ensemblers:\n raise ValueError(\"When specifying the `ensemblers` argument, \"\n \"the following arguments must not be given: {}\".format(\n default_ensembler_kwargs.keys()))\n if not ensemblers:\n default_ensembler_kwargs[\"model_dir\"] = self.model_dir\n ensemblers = [ComplexityRegularizedEnsembler(**default_ensembler_kwargs)]\n\n # These are defined after base Estimator's init so that they can\n # use the same temporary model_dir as the underlying Estimator even if\n # model_dir is not provided.\n self._use_tpu = kwargs.get(\"use_tpu\", False)\n ensemble_builder = _EnsembleBuilder(\n head=head,\n metric_fn=metric_fn,\n use_tpu=self._use_tpu,\n export_subnetwork_logits=export_subnetwork_logits,\n export_subnetwork_last_layer=export_subnetwork_last_layer)\n\n # TODO: Merge CandidateBuilder into SubnetworkManager.\n candidate_builder = _CandidateBuilder(adanet_loss_decay=adanet_loss_decay)\n subnetwork_manager = _SubnetworkManager(\n head=head, metric_fn=metric_fn, use_tpu=self._use_tpu)\n if not placement_strategy:\n placement_strategy = ReplicationStrategy()\n self._iteration_builder = _IterationBuilder(\n candidate_builder,\n subnetwork_manager,\n ensemble_builder,\n ensemblers,\n max_iteration_steps,\n self._summary_maker,\n global_step_combiner_fn,\n placement_strategy,\n replicate_ensemble_in_training,\n use_tpu=self._use_tpu,\n debug=debug,\n enable_ensemble_summaries=enable_ensemble_summaries,\n enable_subnetwork_summaries=enable_subnetwork_summaries)\n self._ensemble_strategies = ensemble_strategies or [GrowStrategy()]\n\n report_dir = report_dir or os.path.join(self._model_dir, \"report\")\n self._report_accessor = _ReportAccessor(report_dir)\n\n def _summary_maker(self, scope=None, skip_summary=False, namespace=None):\n \"\"\"Constructs a `_ScopedSummary`.\"\"\"\n if self._use_tpu:\n return _TPUScopedSummary(\n logdir=self._model_dir,\n scope=scope,\n skip_summary=skip_summary,\n namespace=namespace)\n else:\n return _ScopedSummary(\n scope=scope, skip_summary=skip_summary, namespace=namespace)\n\n def _latest_checkpoint_iteration_number(self):\n \"\"\"Returns the iteration number from the latest checkpoint.\"\"\"\n\n latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)\n if latest_checkpoint is None:\n return 0\n return tf.train.load_variable(latest_checkpoint,\n self._Keys.CURRENT_ITERATION)\n\n def _checkpoint_path_iteration_number(self, checkpoint_path):\n \"\"\"Returns the iteration number from checkpoint_path.\"\"\"\n\n return tf.train.load_variable(checkpoint_path, self._Keys.CURRENT_ITERATION)\n\n def _latest_checkpoint_global_step(self):\n \"\"\"Returns the global step from the latest checkpoint.\"\"\"\n\n latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)\n if latest_checkpoint is None:\n return 0\n return tf.train.load_variable(latest_checkpoint,\n tf_compat.v1.GraphKeys.GLOBAL_STEP)\n\n def train(self,\n input_fn,\n hooks=None,\n steps=None,\n max_steps=None,\n saving_listeners=None):\n # pyformat: disable\n \"\"\"Trains a model given training data :code:`input_fn`.\n\n NOTE: If a given input_fn raises an :code:`OutOfRangeError`, then *all* of\n training will exit. The best practice is to make the training dataset repeat\n forever, in order to perform model search for more than one iteration.\n\n Args:\n input_fn: A function that provides input data for training as minibatches.\n See [Premade Estimators](\n https://tensorflow.org/guide/premade_estimators#create_input_functions)\n for more information. The function should construct and return one of\n the following:\n * A :code:`tf.data.Dataset` object: Outputs of `Dataset` object must\n be a tuple `(features, labels)` with same constraints as below.\n * A tuple `(features, labels)`: Where `features` is a\n :code:`tf.Tensor` or a dictionary of string feature name to\n `Tensor` and `labels` is a :code:`Tensor` or a dictionary of string\n label name to `Tensor`. Both `features` and `labels` are consumed by\n `model_fn`. They should satisfy the expectation of `model_fn` from\n inputs.\n hooks: List of :code:`tf.train.SessionRunHook` subclass instances. Used\n for callbacks inside the training loop.\n steps: Number of steps for which to train the model. If :code:`None`,\n train forever or train until `input_fn` generates the\n :code:`tf.errors.OutOfRange` error or :code:`StopIteration` exception.\n `steps` works incrementally. If you call two times `train(steps=10)`\n then training occurs in total 20 steps. If :code:`OutOfRange` or\n :code:`StopIteration` occurs in the middle, training stops before 20\n steps. If you don't want to have incremental behavior please set\n `max_steps` instead. If set, `max_steps` must be :code:`None`.\n max_steps: Number of total steps for which to train model. If\n :code:`None`, train forever or train until `input_fn` generates the\n :code:`tf.errors.OutOfRange` error or :code:`StopIteration` exception.\n If set, `steps` must be `None`. If :code:`OutOfRange` or\n :code:`StopIteration` occurs in the middle, training stops before\n `max_steps` steps. Two calls to `train(steps=100)` means 200 training\n iterations. On the other hand, two calls to `train(max_steps=100)`\n means that the second call will not do any iteration since first call\n did all 100 steps.\n saving_listeners: list of :code:`CheckpointSaverListener` objects. Used\n for callbacks that run immediately before or after checkpoint savings.\n\n Returns:\n `self`, for chaining.\n\n Raises:\n ValueError: If both `steps` and `max_steps` are not `None`.\n ValueError: If either `steps` or `max_steps <= 0`.\n \"\"\"\n # pyformat: enable\n\n if (steps is not None) and (max_steps is not None):\n raise ValueError(\"Can not provide both steps and max_steps.\")\n if steps is not None and steps <= 0:\n raise ValueError(\"Must specify steps > 0, given: {}\".format(steps))\n\n latest_global_steps = self._latest_checkpoint_global_step()\n if steps is not None:\n max_steps = latest_global_steps + steps\n\n # Each iteration of this AdaNet loop represents an `_Iteration`. The\n # current iteration number is stored as a variable in the checkpoint so\n # that training can be stopped and started at anytime.\n with monkey_patch_default_variable_placement_strategy():\n while True:\n latest_global_steps = self._latest_checkpoint_global_step()\n current_iteration = self._latest_checkpoint_iteration_number()\n logging.info(\"Beginning training AdaNet iteration %s\",\n current_iteration)\n self._iteration_ended = False\n\n # Delegate training to a temporary estimator instead of super to make\n # passing arguments more functional (via params).\n temp_estimator = self._create_temp_estimator(\n self.config, params={\n \"is_inside_training_loop\": True,\n })\n result = temp_estimator.train(\n input_fn=input_fn,\n hooks=hooks,\n max_steps=max_steps,\n saving_listeners=saving_listeners)\n # In TensorFlow v1.14.0 and below, saving listeners are attached to the\n # first CheckpointSaverHook each time train is called. Instead, we pass\n # in the saving_listeners in the first AdaNet iteration only.\n if not tf_compat.version_greater_or_equal(\"1.15.0\"):\n saving_listeners = None\n logging.info(\"Finished training Adanet iteration %s\", current_iteration)\n\n # If training ended because the maximum number of training steps\n # occurred, exit training.\n global_steps = self._latest_checkpoint_global_step()\n if max_steps is not None and global_steps >= max_steps:\n logging.info(\"Training ended after %s global steps\", global_steps)\n return result\n\n # If training ended for any reason other than the iteration ending,\n # exit training.\n if not self._iteration_ended:\n logging.info(\"Training stop requested\")\n return result\n\n max_iterations = self._max_iterations\n if max_iterations and current_iteration + 1 >= max_iterations:\n logging.info(\n \"Training ended after exceeding maximum AdaNet iterations\")\n if steps is not None and global_steps - latest_global_steps < steps:\n logging.warning(\n \"Both `max_iterations` and `steps` were specified, but \"\n \"`max_iterations` takes precedence over `steps`\")\n return result\n\n logging.info(\"Beginning bookkeeping phase for iteration %s\",\n current_iteration)\n\n # The chief prepares the next AdaNet iteration, and increments the\n # iteration number by 1.\n if self.config.is_chief:\n with self._force_replication_strategy():\n self._execute_bookkeeping_phase(\n input_fn, current_iteration, train_hooks=hooks or [])\n\n # This inner loop serves mainly for synchronizing the workers with the\n # chief during distributed training. Workers that finish training early\n # wait for the chief to prepare the next iteration and increment the\n # iteration number. Workers that are slow to finish training quickly\n # move onto the next iteration. And workers that go offline and return\n # online after training ended terminate gracefully.\n wait_for_chief = not self.config.is_chief\n timer = _CountDownTimer(self._worker_wait_timeout_secs)\n while wait_for_chief:\n # If the chief hits max_steps, it will stop training itself and not\n # increment the iteration number, so this is how the worker knows to\n # exit if it wakes up and the chief is gone.\n # TODO: Support steps parameter.\n if self._latest_checkpoint_global_step() >= max_steps:\n return result\n\n # In distributed training, a worker may end training before the chief\n # overwrites the checkpoint with the incremented iteration number. If\n # that is the case, it should wait for the chief to do so. Otherwise\n # the worker will get stuck waiting for its weights to be initialized.\n next_iteration = self._latest_checkpoint_iteration_number()\n if next_iteration > current_iteration:\n break\n\n # Check timeout when waiting for potentially downed chief.\n if timer.secs_remaining() == 0:\n logging.error(\n \"Chief job did not prepare next iteration after %s secs. It \"\n \"may have been preempted, been turned down, or crashed. This \"\n \"worker is now exiting training.\",\n self._worker_wait_timeout_secs)\n return result\n logging.info(\"Waiting for chief to finish\")\n time.sleep(self._worker_wait_secs)\n\n # Stagger starting workers to prevent training instability.\n # Mimics behavior of tf.estimator.train_and_evaluate.\n if not self.config.is_chief and self.config.task_type == \"worker\":\n task_id = self.config.task_id or 0\n # Stagger each worker up to 60 secs.\n delay_secs = min(self._max_worker_delay_secs,\n (task_id + 1.) * self._delay_secs_per_worker)\n if delay_secs > 0.:\n logging.info(\"Waiting %d secs before continuing training.\",\n delay_secs)\n time.sleep(delay_secs)\n\n logging.info(\"Finished bookkeeping phase for iteration %s\",\n current_iteration)\n\n def evaluate(self,\n input_fn,\n steps=None,\n hooks=None,\n checkpoint_path=None,\n name=None):\n if not checkpoint_path:\n checkpoint_path = tf.train.latest_checkpoint(self.model_dir)\n\n # Ensure that the read to get the iteration number and read to restore\n # variable values come from the same checkpoint during evaluation.\n params = {\n \"evaluation_checkpoint_path\":\n checkpoint_path,\n \"evaluation_name\":\n name,\n \"best_ensemble_index\":\n self._compute_best_ensemble_index(checkpoint_path),\n }\n\n # Delegate evaluation to a temporary estimator instead of super to make\n # passing arguments more functional (via params).\n temp_estimator = self._create_temp_estimator(self.config, params=params)\n result = temp_estimator.evaluate(\n input_fn,\n steps=steps,\n hooks=hooks,\n checkpoint_path=checkpoint_path,\n name=name)\n return result\n\n def predict(self,\n input_fn,\n predict_keys=None,\n hooks=None,\n checkpoint_path=None,\n yield_single_examples=True):\n # Delegate predicting to a temporary estimator instead of super to make\n # passing arguments more functional (via params).\n temp_estimator = self._create_temp_estimator(\n self.config,\n params={\n \"best_ensemble_index\":\n self._compute_best_ensemble_index(checkpoint_path),\n })\n return temp_estimator.predict(input_fn, predict_keys, hooks,\n checkpoint_path, yield_single_examples)\n\n @deprecation.deprecated(\n None, \"This function has been renamed, use `export_saved_model` instead.\")\n def export_savedmodel(self,\n export_dir_base,\n serving_input_receiver_fn,\n assets_extra=None,\n as_text=False,\n checkpoint_path=None,\n strip_default_attrs=False):\n # Delegate exporting to a temporary estimator instead of super to make\n # passing arguments more functional (via params).\n temp_estimator = self._create_temp_estimator(\n self.config,\n params={\n \"best_ensemble_index\":\n self._compute_best_ensemble_index(checkpoint_path),\n })\n with self._force_replication_strategy():\n return temp_estimator.export_savedmodel(\n export_dir_base=export_dir_base,\n serving_input_receiver_fn=serving_input_receiver_fn,\n assets_extra=assets_extra,\n as_text=as_text,\n checkpoint_path=checkpoint_path,\n strip_default_attrs=strip_default_attrs)\n\n def export_saved_model(self,\n export_dir_base,\n serving_input_receiver_fn,\n assets_extra=None,\n as_text=False,\n checkpoint_path=None,\n experimental_mode=tf.estimator.ModeKeys.PREDICT):\n # Delegate exporting to a temporary estimator instead of super to make\n # passing arguments more functional (via params).\n temp_estimator = self._create_temp_estimator(\n self.config,\n params={\n \"best_ensemble_index\":\n self._compute_best_ensemble_index(checkpoint_path),\n })\n with self._force_replication_strategy():\n return temp_estimator.export_saved_model(\n export_dir_base=export_dir_base,\n serving_input_receiver_fn=serving_input_receiver_fn,\n assets_extra=assets_extra,\n as_text=as_text,\n checkpoint_path=checkpoint_path,\n experimental_mode=experimental_mode)\n\n def experimental_export_all_saved_models(self,\n export_dir_base,\n input_receiver_fn_map,\n assets_extra=None,\n as_text=False,\n checkpoint_path=None):\n # Delegate exporting to a temporary estimator instead of super to make\n # passing arguments more functional (via params).\n temp_estimator = self._create_temp_estimator(\n self.config,\n params={\n \"best_ensemble_index\":\n self._compute_best_ensemble_index(checkpoint_path),\n })\n with self._force_replication_strategy():\n return temp_estimator.experimental_export_all_saved_models(\n export_dir_base=export_dir_base,\n input_receiver_fn_map=input_receiver_fn_map,\n assets_extra=assets_extra,\n as_text=as_text,\n checkpoint_path=checkpoint_path)\n\n def _compute_best_ensemble_index(self, checkpoint_path):\n \"\"\"Runs the Evaluator to obtain the best ensemble index among candidates.\"\"\"\n\n # AdaNet Replay.\n if self._replay_config:\n iteration_number = (\n self._checkpoint_path_iteration_number(checkpoint_path)\n if checkpoint_path else self._latest_checkpoint_iteration_number())\n best_index = self._replay_config.get_best_ensemble_index(iteration_number)\n if best_index is not None:\n return best_index\n\n if self._evaluator:\n return self._execute_candidate_evaluation_phase(\n self._evaluator.input_fn,\n export_best_architecture=False,\n checkpoint_path=checkpoint_path)\n return None\n\n @contextlib.contextmanager\n def _force_replication_strategy(self):\n \"\"\"Sets placement_strategy to always be ReplicationStrategy.\n\n This is useful during the bookkeeping phase and when Estimator's export\n saved model functions are called. In both of these cases, local tf.Sessions\n are created which do not have access to the cluster. Therefore,\n RoundRobinReplicationStrategy will fail when it tries to place ops on\n cluster devices which the local tf.Sessions cannot access.\n\n Yields:\n Nothing. Simply returns control back to the caller.\n \"\"\"\n\n temp_placement_strategy = self._iteration_builder.placement_strategy\n try:\n self._iteration_builder.placement_strategy = ReplicationStrategy()\n yield\n finally:\n self._iteration_builder.placement_strategy = temp_placement_strategy\n\n @contextlib.contextmanager\n def _call_input_fn_in_new_graph(self, input_fn, mode, config):\n \"\"\"Calls the given input_fn and yields results within a new graph context.\n\n Yields features, labels, and hooks from the result of an Estimator input_fn.\n\n Args:\n input_fn: a function that takes no arguments and returns one of:\n * A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a\n tuple (features, labels) with same constraints as below.\n * A tuple (features, labels): Where `features` is a `Tensor` or a\n dictionary of string feature name to `Tensor` and `labels` is a\n `Tensor` or a dictionary of string label name to `Tensor`. Both\n `features` and `labels` are consumed by `model_fn`. They should\n satisfy the expectation of `model_fn` from inputs.\n mode: Defines whether this is training, evaluation or prediction. See\n `ModeKeys`.\n config: The current `tf.estimator.RunConfig`.\n\n Yields:\n Tuple of features, labels, and input_hooks, where features are as\n described above, labels are as described above or None, and input_hooks\n are a list of SessionRunHooks to be included when running.\n\n Raises:\n ValueError: if the result is a list or tuple of length != 2.\n \"\"\"\n\n with tf.Graph().as_default() as g:\n tf_compat.v1.set_random_seed(config.tf_random_seed)\n # Create global step before calling model_fn as does superclass.\n self._create_and_assert_global_step(g)\n with tf.device(\"/cpu:0\"):\n input_fn_outs = input_fn()\n yield util.parse_input_fn_result(input_fn_outs)\n\n def _create_temp_run_config(self, temp_model_dir):\n \"\"\"Creates a temp `RunConfig` for the bookkeeping phase.\"\"\"\n\n config = self.config\n return tf.estimator.RunConfig(\n model_dir=temp_model_dir,\n tf_random_seed=config.tf_random_seed,\n session_config=config.session_config,\n protocol=config.protocol)\n\n def _create_temp_estimator(self, config, params):\n \"\"\"Creates a temp `Estimator` to grow the graph for the next iteration.\"\"\"\n\n return tf.estimator.Estimator(\n model_fn=self._adanet_model_fn, config=config, params=params)\n\n def _execute_bookkeeping_phase(self, train_input_fn, iteration_number,\n train_hooks):\n \"\"\"Run the AdaNet bookkeeping phase to prepare the next iteration.\n\n This method creates a TensorFlow graph up to three times:\n 1. To evaluate all candidate ensembles to find the best one.\n 2. To materialize reports and store them to disk (if report_materializer\n exists).\n 3. To grow the TensorFlow graph and overwrite the model directory's\n checkpoint with the next iteration's ops.\n\n Args:\n train_input_fn: The input_fn used during training.\n iteration_number: Integer current iteration number.\n train_hooks: List of `SessionRunHook` passed for training.\n \"\"\"\n\n next_iteration_number = iteration_number + 1\n logging.info(\"Preparing iteration %s:\", next_iteration_number)\n\n if self._evaluator:\n evaluator_input_fn = self._evaluator.input_fn\n else:\n evaluator_input_fn = train_input_fn\n best_ensemble_index = self._execute_candidate_evaluation_phase(\n evaluator_input_fn, export_best_architecture=True)\n self._execute_report_materialization_phase(best_ensemble_index)\n self._execute_graph_growing_phase(train_input_fn, train_hooks)\n\n logging.info(\"Finished preparing iteration %s.\", next_iteration_number)\n\n def _execute_candidate_evaluation_phase(self,\n evaluator_input_fn,\n export_best_architecture,\n checkpoint_path=None):\n \"\"\"Evaluates and chooses the best ensemble for this iteration.\n\n Args:\n evaluator_input_fn: The input_fn for evaluation.\n export_best_architecture: Boolean whether to persist the best ensemble's\n architecture to the model_dir.\n checkpoint_path: Optional path to the checkpoint to restore from. If None,\n effectively assumes the latest checkpoint path.\n\n Returns:\n Integer index of the best ensemble withing the list of candidate ensembles\n for the current iteration.\n \"\"\"\n\n logging.info(\"Evaluating candidates...\")\n config = self.config\n mode = tf.estimator.ModeKeys.EVAL\n with self._call_input_fn_in_new_graph(evaluator_input_fn, mode,\n config) as (features, labels,\n input_hooks):\n current_iteration, _ = self._create_iteration(\n features, labels, mode, config, is_growing_phase=False)\n best_ensemble_index = self._get_best_ensemble_index(\n current_iteration, input_hooks)\n architecture = current_iteration.candidates[\n best_ensemble_index].ensemble_spec.architecture\n if export_best_architecture:\n iteration_number = (\n self._checkpoint_path_iteration_number(checkpoint_path)\n if checkpoint_path else self._latest_checkpoint_iteration_number())\n new_architecture_filename = self._architecture_filename(iteration_number)\n logging.info(\"Exporting best ensemble architecture to %s\",\n new_architecture_filename)\n self._save_architecture(new_architecture_filename, architecture)\n logging.info(\"Done evaluating candidates.\")\n\n return best_ensemble_index\n\n def _execute_report_materialization_phase(self, best_ensemble_index):\n \"\"\"Materializes and store subnetwork reports.\"\"\"\n\n if not self._report_materializer:\n return\n\n logging.info(\"Materializing reports...\")\n input_fn = self._report_materializer.input_fn\n mode = tf.estimator.ModeKeys.EVAL\n config = self.config\n with self._call_input_fn_in_new_graph(input_fn, mode,\n config) as (features, labels,\n input_hooks):\n current_iteration, _ = self._create_iteration(\n features, labels, mode, config, is_growing_phase=False)\n self._materialize_report(current_iteration, input_hooks,\n best_ensemble_index)\n logging.info(\"Done materializing reports.\")\n\n def _execute_graph_growing_phase(self, train_input_fn, train_hooks):\n \"\"\"Grows the tensorflow graph for the next iteration.\n\n Normally the MonitoredTrainingSession does not allow one to add new ops to\n the TensorFlow graph once training starts. To get around this limitation,\n create the graph for the next iteration and overwrite the model directory\n checkpoint with the expanded graph.\n\n Args:\n train_input_fn: The input_fn used during training.\n train_hooks: List of `SessionRunHook` passed for training.\n \"\"\"\n\n logging.info(\"Adapting graph and incrementing iteration number...\")\n config = self.config\n temp_model_dir = os.path.join(self.model_dir, \"temp_model_dir\")\n if not tf.io.gfile.exists(temp_model_dir):\n tf.io.gfile.makedirs(temp_model_dir)\n # Since deleting a model_dir can fail, we need each temporary directory to\n # be unique. So we use the UTC time when creating it.\n time_in_millis = int(time.time() * 1000)\n temp_model_sub_dir = os.path.join(temp_model_dir, str(time_in_millis))\n temp_run_config = config.replace(model_dir=temp_model_sub_dir)\n temp_estimator = self._create_temp_estimator(\n config=temp_run_config,\n params={\n \"is_growing_phase\": True,\n \"is_inside_training_loop\": True,\n })\n # Do not train with any saving_listeners since this is just a temporary\n # estimator.\n temp_estimator.train(\n input_fn=train_input_fn,\n max_steps=1,\n hooks=self._process_hooks_for_growing_phase(train_hooks),\n saving_listeners=None)\n\n _delete_directory(temp_model_dir)\n logging.info(\"Done adapting graph and incrementing iteration number.\")\n\n def _architecture_filename(self, iteration_number):\n \"\"\"Returns the filename of the given iteration's frozen graph.\"\"\"\n\n frozen_checkpoint = os.path.join(self.model_dir, \"architecture\")\n return \"{}-{}.json\".format(frozen_checkpoint, iteration_number)\n\n def _get_best_ensemble_index(self, current_iteration, input_hooks):\n \"\"\"Returns the best candidate ensemble's index in this iteration.\n\n Evaluates the ensembles using an `Evaluator` when provided. Otherwise,\n it returns the index of the best candidate as defined by the `_Iteration`.\n\n Args:\n current_iteration: Current `_Iteration`.\n input_hooks: List of SessionRunHooks to be included when running.\n\n Returns:\n Index of the best ensemble in the iteration's list of `_Candidates`.\n \"\"\"\n # AdaNet Replay.\n if self._replay_config:\n best_index = self._replay_config.get_best_ensemble_index(\n current_iteration.number)\n if best_index is not None:\n return best_index\n\n # Skip the evaluation phase when there is only one candidate subnetwork.\n if len(current_iteration.candidates) == 1:\n logging.info(\"'%s' is the only ensemble\",\n current_iteration.candidates[0].ensemble_spec.name)\n return 0\n\n # The zero-th index candidate at iteration t>0 is always the\n # previous_ensemble.\n if current_iteration.number > 0 and self._force_grow and (len(\n current_iteration.candidates) == 2):\n logging.info(\"With `force_grow` enabled, '%s' is the only ensemble\",\n current_iteration.candidates[1].ensemble_spec.name)\n return 1\n\n latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)\n logging.info(\"Starting ensemble evaluation for iteration %s\",\n current_iteration.number)\n for hook in input_hooks:\n hook.begin()\n with tf_compat.v1.Session() as sess:\n init = tf.group(tf_compat.v1.global_variables_initializer(),\n tf_compat.v1.local_variables_initializer(),\n tf_compat.v1.tables_initializer())\n sess.run(init)\n coord = tf.train.Coordinator()\n for hook in input_hooks:\n hook.after_create_session(sess, coord)\n saver = tf_compat.v1.train.Saver(sharded=True)\n saver.restore(sess, latest_checkpoint)\n\n tf_compat.v1.train.start_queue_runners(sess=sess, coord=coord)\n ensemble_metrics = []\n for candidate in current_iteration.candidates:\n metrics = call_eval_metrics(candidate.ensemble_spec.eval_metrics)\n metrics[\"adanet_loss\"] = tf_compat.v1.metrics.mean(\n candidate.ensemble_spec.adanet_loss)\n ensemble_metrics.append(metrics)\n if self._evaluator:\n metric_name = self._evaluator.metric_name\n metrics = self._evaluator.evaluate(sess, ensemble_metrics)\n objective_fn = self._evaluator.objective_fn\n else:\n metric_name = \"adanet_loss\"\n metrics = sess.run(\n [c.adanet_loss for c in current_iteration.candidates])\n objective_fn = np.nanargmin\n\n values = []\n for i in range(len(current_iteration.candidates)):\n ensemble_name = current_iteration.candidates[i].ensemble_spec.name\n values.append(\"{}/{} = {:.6f}\".format(metric_name, ensemble_name,\n metrics[i]))\n logging.info(\"Computed ensemble metrics: %s\", \", \".join(values))\n if self._force_grow and current_iteration.number > 0:\n logging.info(\n \"The `force_grow` override is enabled, so the \"\n \"the performance of the previous ensemble will be ignored.\")\n # NOTE: The zero-th index candidate at iteration t>0 is always the\n # previous_ensemble.\n metrics = metrics[1:]\n index = objective_fn(metrics) + 1\n else:\n index = objective_fn(metrics)\n logging.info(\"Finished ensemble evaluation for iteration %s\",\n current_iteration.number)\n logging.info(\"'%s' at index %s is the best ensemble\",\n current_iteration.candidates[index].ensemble_spec.name, index)\n return index\n\n def _materialize_report(self, current_iteration, input_hooks,\n best_ensemble_index):\n \"\"\"Generates reports as defined by `Builder`s.\n\n Materializes the Tensors and metrics defined in the `Builder`s'\n `build_subnetwork_report` method using `ReportMaterializer`, and stores\n them to disk using `_ReportAccessor`.\n\n Args:\n current_iteration: Current `_Iteration`.\n input_hooks: List of SessionRunHooks to be included when running.\n best_ensemble_index: Integer index of the best candidate ensemble.\n \"\"\"\n\n latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)\n logging.info(\"Starting metric logging for iteration %s\",\n current_iteration.number)\n\n best_candidate = current_iteration.candidates[best_ensemble_index]\n best_architecture = best_candidate.ensemble_spec.architecture\n included_subnetwork_names = [\n name for i, name in best_architecture.subnetworks\n if i == current_iteration.number\n ]\n for hook in input_hooks:\n hook.begin()\n with tf_compat.v1.Session() as sess:\n init = tf.group(tf_compat.v1.global_variables_initializer(),\n tf_compat.v1.local_variables_initializer(),\n tf_compat.v1.tables_initializer())\n sess.run(init)\n coord = tf.train.Coordinator()\n for hook in input_hooks:\n hook.after_create_session(sess, coord)\n saver = tf_compat.v1.train.Saver(sharded=True)\n saver.restore(sess, latest_checkpoint)\n tf_compat.v1.train.start_queue_runners(sess=sess, coord=coord)\n materialized_reports = (\n self._report_materializer.materialize_subnetwork_reports(\n sess, current_iteration.number,\n current_iteration.subnetwork_reports, included_subnetwork_names))\n self._report_accessor.write_iteration_report(current_iteration.number,\n materialized_reports)\n\n logging.info(\"Finished saving subnetwork reports for iteration %s\",\n current_iteration.number)\n\n def _process_hooks_for_growing_phase(self, hooks):\n \"\"\"Processes hooks which will run during the graph growing phase.\n\n In particular the following things are done:\n - CheckpointSaverHooks are filtered out since they are not intended to\n run between training runs and will cause errors. We also reset the\n CheckpointSaverHooks' Saver between iterations, see b/122795064 for more\n details.\n - Decorate the remaining hooks with _GraphGrowingHookDecorator to only run\n the begin() and end() methods during the graph growing phase.\n\n Args:\n hooks: The list of `SessionRunHooks` to process.\n\n Returns:\n The processed hooks which should run during the growing phase.\n \"\"\"\n\n processed_hooks = []\n for hook in hooks:\n # Reset CheckpointSaverHooks' Saver and filter out.\n if isinstance(hook, tf_compat.CheckpointSaverHook):\n hook._saver = None # pylint: disable=protected-access\n continue\n # Do not decorate the _OverwriteCheckpointHook since it should always\n # run during the graph growing phase.\n if not isinstance(hook, _OverwriteCheckpointHook):\n hook = _GraphGrowingHookDecorator(hook)\n processed_hooks.append(hook)\n return processed_hooks\n\n def _training_chief_hooks(self, current_iteration, training):\n \"\"\"Returns chief-only training hooks to be run this iteration.\n\n Args:\n current_iteration: Current `_Iteration`.\n training: Whether in training mode.\n\n Returns:\n A list of `SessionRunHook` instances.\n \"\"\"\n\n if not training:\n return []\n\n training_hooks = []\n for summary in current_iteration.summaries:\n output_dir = self.model_dir\n if summary.scope:\n output_dir = os.path.join(output_dir, summary.namespace, summary.scope)\n summary_saver_hook = tf_compat.SummarySaverHook(\n save_steps=self.config.save_summary_steps,\n output_dir=output_dir,\n summary_op=summary.merge_all())\n training_hooks.append(summary_saver_hook)\n training_hooks += list(\n current_iteration.estimator_spec.training_chief_hooks)\n return training_hooks\n\n def _training_hooks(self, current_iteration, training,\n iteration_number_tensor, previous_iteration_vars,\n is_growing_phase):\n \"\"\"Returns training hooks to be run on all workers and chief this iteration.\n\n Args:\n current_iteration: Current `_Iteration`.\n training: Whether in training mode.\n iteration_number_tensor: An int tensor of the current AdaNet iteraiton.\n previous_iteration_vars: The variables of the previous iteration to be\n restored by the _OverwriteCheckpointHook. If empty, no\n _OverwriteCheckpointHook will be created.\n is_growing_phase: Whether we are in the AdaNet graph growing phase.\n\n Returns:\n A list of `SessionRunHook` instances.\n \"\"\"\n\n if not training:\n return []\n\n def after_fn():\n self._iteration_ended = True\n\n training_hooks = list(current_iteration.estimator_spec.training_hooks) + [\n _StopAfterTrainingHook(current_iteration, after_fn=after_fn)\n ]\n\n if is_growing_phase:\n training_hooks.append(\n _OverwriteCheckpointHook(current_iteration, iteration_number_tensor,\n previous_iteration_vars, self.config))\n return training_hooks\n\n def _evaluation_hooks(self, current_iteration, training, evaluation_name):\n \"\"\"Returns evaluation hooks for this iteration.\n\n Args:\n current_iteration: Current `_Iteration`.\n training: Whether in training mode.\n evaluation_name: String name to append to the eval directory.\n\n Returns:\n A list of `SessionRunHook` instances.\n \"\"\"\n\n if training:\n return []\n evaluation_hooks = []\n for subnetwork_spec in current_iteration.subnetwork_specs:\n evaluation_hooks.append(\n self._create_eval_metric_saver_hook(\n subnetwork_spec.eval_metrics,\n subnetwork_spec.name,\n kind=\"subnetwork\",\n evaluation_name=evaluation_name))\n for candidate in current_iteration.candidates:\n evaluation_hooks.append(\n self._create_eval_metric_saver_hook(\n candidate.ensemble_spec.eval_metrics,\n candidate.ensemble_spec.name,\n kind=\"ensemble\",\n evaluation_name=evaluation_name))\n return evaluation_hooks\n\n def _create_eval_metric_saver_hook(self, eval_metrics, name, kind,\n evaluation_name):\n eval_subdir = \"eval\"\n if evaluation_name:\n eval_subdir = \"eval_{}\".format(evaluation_name)\n return _EvalMetricSaverHook(\n name=name,\n kind=kind,\n eval_metrics=eval_metrics,\n output_dir=os.path.join(self.model_dir, kind, name, eval_subdir))\n\n def _save_architecture(self, filename, architecture):\n \"\"\"Persists the ensemble's architecture in a serialized format.\n\n Writes to a text file with one subnetwork's iteration number and name\n per line.\n\n Args:\n filename: String filename to persist the ensemble architecture.\n architecture: Target `_Architecture` instance.\n \"\"\"\n\n # Make directories since model_dir may not have been created yet.\n tf.io.gfile.makedirs(os.path.dirname(filename))\n with tf.io.gfile.GFile(filename, \"w\") as record_file:\n record_file.write(\n architecture.serialize(self._latest_checkpoint_global_step().item()))\n\n def _read_architecture(self, filename):\n \"\"\"Reads an ensemble architecture from disk.\n\n Assumes the file was written with `_save_architecture`.\n\n Args:\n filename: String filename where features were recorded.\n\n Returns:\n An `_Architecture` instance.\n\n Raises:\n OSError: When file not found at `filename`.\n \"\"\"\n\n if not tf.io.gfile.exists(filename):\n raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), filename)\n\n with tf.io.gfile.GFile(filename, \"rb\") as gfile:\n return _Architecture.deserialize(gfile.read().decode())\n\n def _find_ensemble_candidate(self, ensemble_candidate_name,\n ensemble_candidates):\n \"\"\"Returns the ensemble candidate with the given name.\"\"\"\n\n for ensemble_candidate in ensemble_candidates:\n if ensemble_candidate.name == ensemble_candidate_name:\n return ensemble_candidate\n raise ValueError(\n \"Could not find a matching ensemble candidate with name '{}'. \"\n \"Are you sure the `adanet.ensemble.Strategy` is deterministic?\".format(\n ensemble_candidate_name))\n\n # TODO: Refactor architecture building logic to its own module.\n def _architecture_ensemble_spec(self, architecture, iteration_number,\n features, mode, labels,\n previous_ensemble_spec, config):\n \"\"\"Returns an `_EnsembleSpec` with the given architecture.\n\n Creates the ensemble architecture by calling `generate_subnetworks` on\n `self._subnetwork_generator` and only calling `build_subnetwork` on\n `Builders` included in the architecture. Once their ops are created, their\n variables are restored from the checkpoint.\n\n Args:\n architecture: An `_Architecture` instance.\n iteration_number: Integer current iteration number.\n features: Dictionary of `Tensor` objects keyed by feature name.\n mode: Defines whether this is training, evaluation or prediction. See\n `ModeKeys`.\n labels: Labels `Tensor` or a dictionary of string label name to `Tensor`\n (for multi-head). Can be `None`.\n previous_ensemble_spec: The `_EnsembleSpec` for the previous iteration.\n Will be `None` for the first iteration.\n config: The current `tf.estimator.RunConfig`.\n\n Returns:\n An `EnsembleSpec` instance for the given architecture.\n\n Raises:\n ValueError: If a subnetwork from `architecture` is not found in the\n generated candidate `Builders` of the specified iteration.\n \"\"\"\n\n previous_ensemble = None\n if previous_ensemble_spec:\n previous_ensemble = previous_ensemble_spec.ensemble\n current_iteration = None\n for t, names in architecture.subnetworks_grouped_by_iteration:\n if t != iteration_number:\n continue\n previous_ensemble_reports, all_reports = [], []\n if self._report_materializer:\n previous_ensemble_reports, all_reports = (\n self._collate_subnetwork_reports(iteration_number))\n generated_subnetwork_builders = (\n self._call_generate_candidates(\n previous_ensemble=previous_ensemble,\n iteration_number=iteration_number,\n previous_ensemble_reports=previous_ensemble_reports,\n all_reports=all_reports,\n config=config))\n subnetwork_builder_names = {\n b.name: b for b in generated_subnetwork_builders\n }\n rebuild_subnetwork_builders = []\n for name in names:\n if name not in subnetwork_builder_names:\n raise ValueError(\n \"Required subnetwork builder is missing for iteration {}: {}\"\n .format(iteration_number, name))\n rebuild_subnetwork_builders.append(subnetwork_builder_names[name])\n previous_ensemble_summary = None\n previous_ensemble_subnetwork_builders = None\n if previous_ensemble_spec:\n # Always skip summaries when rebuilding previous architecture,\n # since they are not useful.\n previous_ensemble_summary = self._summary_maker(\n namespace=\"ensemble\",\n scope=previous_ensemble_spec.name,\n skip_summary=True)\n previous_ensemble_subnetwork_builders = (\n previous_ensemble_spec.subnetwork_builders)\n ensemble_candidates = []\n for ensemble_strategy in self._ensemble_strategies:\n ensemble_candidates += ensemble_strategy.generate_ensemble_candidates(\n rebuild_subnetwork_builders, previous_ensemble_subnetwork_builders)\n ensemble_candidate = self._find_ensemble_candidate(\n architecture.ensemble_candidate_name, ensemble_candidates)\n current_iteration = self._iteration_builder.build_iteration(\n base_global_step=architecture.global_step,\n iteration_number=iteration_number,\n ensemble_candidates=[ensemble_candidate],\n subnetwork_builders=rebuild_subnetwork_builders,\n features=features,\n labels=labels,\n mode=mode,\n config=config,\n previous_ensemble_summary=previous_ensemble_summary,\n previous_ensemble_spec=previous_ensemble_spec,\n rebuilding=True,\n rebuilding_ensembler_name=architecture.ensembler_name)\n max_candidates = 2 if previous_ensemble_spec else 1\n assert len(current_iteration.candidates) == max_candidates\n previous_ensemble_spec = current_iteration.candidates[-1].ensemble_spec\n previous_ensemble = previous_ensemble_spec.ensemble\n previous_ensemble_spec.architecture.set_replay_indices(\n architecture.replay_indices)\n return previous_ensemble_spec\n\n def _collate_subnetwork_reports(self, iteration_number):\n \"\"\"Prepares subnetwork.Reports to be passed to Generator.\n\n Reads subnetwork.MaterializedReports from past iterations,\n collates those that were included in previous_ensemble into\n previous_ensemble_reports as a List of subnetwork.MaterializedReports,\n and collates all reports from previous iterations into all_reports as\n another List of subnetwork.MaterializedReports.\n\n Args:\n iteration_number: Python integer AdaNet iteration number, starting from 0.\n\n Returns:\n (previous_ensemble_reports: List<subnetwork.MaterializedReport>,\n materialized_reports: List<MaterializedReport>)\n \"\"\"\n\n materialized_reports_all = (self._report_accessor.read_iteration_reports())\n previous_ensemble_reports = []\n all_reports = []\n\n # Since the number of iteration reports changes after the\n # MATERIALIZE_REPORT phase, we need to make sure that we always pass the\n # same reports to the Generator in the same iteration,\n # otherwise the graph that is built in the FREEZE_ENSEMBLE phase would be\n # different from the graph built in the training phase.\n\n # Iteration 0 should have 0 iteration reports passed to the\n # Generator, since there are no previous iterations.\n # Iteration 1 should have 1 list of reports for Builders\n # generated in iteration 0.\n # Iteration 2 should have 2 lists of reports -- one for iteration 0,\n # one for iteration 1. Note that the list of reports for iteration >= 1\n # should contain \"previous_ensemble\", in addition to the\n # Builders at the start of that iteration.\n # Iteration t should have t lists of reports.\n\n for i, iteration_reports in enumerate(materialized_reports_all):\n\n # This ensures that the FREEZE_ENSEMBLE phase does not pass the reports\n # generated in the previous phase of the same iteration to the\n # Generator when building the graph.\n if i >= iteration_number:\n break\n\n chosen_subnetworks_in_this_iteration = [\n subnetwork_report for subnetwork_report in iteration_reports\n if subnetwork_report.included_in_final_ensemble\n ]\n previous_ensemble_reports += chosen_subnetworks_in_this_iteration\n all_reports.extend(iteration_reports)\n\n return previous_ensemble_reports, all_reports\n\n def _train_op(self, iteration_estimator_spec, is_growing_phase):\n \"\"\"Returns the iteration train op or tf.no_op if growing the graph.\"\"\"\n\n train_op = iteration_estimator_spec.train_op\n if is_growing_phase:\n train_op = tf_compat.v1.train.get_global_step().assign_add(1)\n # NOTE: some version of TensorFlow check that train_op is an Op or Tensor\n # and crash if train_op is a Variable.\n train_op = tf.identity(train_op)\n return train_op\n\n def _create_estimator_spec(self, current_iteration, mode,\n iteration_number_tensor, previous_iteration_vars,\n is_growing_phase, evaluation_name):\n \"\"\"Creates the EstimatorSpec which will be returned by _adanet_model_fn.\"\"\"\n\n training = mode == tf.estimator.ModeKeys.TRAIN\n iteration_estimator_spec = current_iteration.estimator_spec\n training_chief_hooks = self._training_chief_hooks(current_iteration,\n training)\n training_hooks = self._training_hooks(current_iteration, training,\n iteration_number_tensor,\n previous_iteration_vars,\n is_growing_phase)\n if is_growing_phase:\n training_chief_hooks = self._process_hooks_for_growing_phase(\n training_chief_hooks)\n training_hooks = self._process_hooks_for_growing_phase(training_hooks)\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=iteration_estimator_spec.predictions,\n loss=iteration_estimator_spec.loss,\n train_op=self._train_op(iteration_estimator_spec, is_growing_phase),\n eval_metric_ops=iteration_estimator_spec.eval_metric_ops,\n training_chief_hooks=training_chief_hooks,\n training_hooks=training_hooks,\n evaluation_hooks=self._evaluation_hooks(current_iteration, training,\n evaluation_name),\n scaffold=tf_compat.v1.train.Scaffold(summary_op=tf.constant(\"\")),\n export_outputs=iteration_estimator_spec.export_outputs)\n\n def _call_generate_candidates(self, previous_ensemble, iteration_number,\n previous_ensemble_reports, all_reports, config):\n defined_args = inspect.getargspec(\n self._subnetwork_generator.generate_candidates).args\n generate_args = dict(\n previous_ensemble=previous_ensemble,\n iteration_number=iteration_number,\n previous_ensemble_reports=previous_ensemble_reports,\n all_reports=all_reports)\n if \"config\" in defined_args:\n generate_args[\"config\"] = config\n return self._subnetwork_generator.generate_candidates(**generate_args)\n\n def _create_iteration(self,\n features,\n labels,\n mode,\n config,\n is_growing_phase,\n evaluation_checkpoint_path=None,\n best_ensemble_index_override=None):\n \"\"\"Constructs the TF ops and variables for the current iteration.\n\n Args:\n features: Dictionary of `Tensor` objects keyed by feature name.\n labels: Labels `Tensor` or a dictionary of string label name to `Tensor`\n (for multi-head). Can be `None`.\n mode: Defines whether this is training, evaluation or prediction. See\n `ModeKeys`.\n config: The current `tf.estimator.RunConfig`.\n is_growing_phase: Whether we are in the AdaNet graph growing phase.\n evaluation_checkpoint_path: Path of the evaluation checkpoint to use. When\n `None`, this method uses the latest checkpoint instead.\n best_ensemble_index_override: Integer index to identify the latest\n iteration's best ensemble candidate instead of computing the best\n ensemble index dynamically conditional on the ensemble AdaNet losses.\n\n Returns:\n A two-tuple of the current `_Iteration`, and list of variables from\n the previous iteration for restoring during the graph growing phase.\n \"\"\"\n\n iteration_number = (\n self._checkpoint_path_iteration_number(evaluation_checkpoint_path)\n if evaluation_checkpoint_path else\n self._latest_checkpoint_iteration_number())\n\n # Use the evaluation checkpoint path to get both the iteration number and\n # variable values to avoid any race conditions between the first and second\n # checkpoint reads.\n if mode == tf.estimator.ModeKeys.EVAL and evaluation_checkpoint_path:\n iteration_number = tf.train.load_variable(evaluation_checkpoint_path,\n self._Keys.CURRENT_ITERATION)\n\n if is_growing_phase:\n assert mode == tf.estimator.ModeKeys.TRAIN\n assert config.is_chief\n iteration_number += 1\n\n # Only record summaries when training.\n skip_summaries = (mode != tf.estimator.ModeKeys.TRAIN or is_growing_phase)\n base_global_step = 0\n with tf_compat.v1.variable_scope(\"adanet\"):\n previous_ensemble_spec = None\n previous_ensemble = None\n previous_ensemble_summary = None\n previous_ensemble_subnetwork_builders = None\n architecture = None\n for i in range(iteration_number):\n architecture_filename = self._architecture_filename(i)\n if not tf.io.gfile.exists(architecture_filename):\n continue\n architecture = self._read_architecture(architecture_filename)\n logging.info(\n \"Importing architecture from %s: [%s].\", architecture_filename,\n \", \".join(\n sorted([\n \"'{}:{}'\".format(t, n)\n for t, n in architecture.subnetworks_grouped_by_iteration\n ])))\n base_global_step = architecture.global_step\n previous_ensemble_spec = self._architecture_ensemble_spec(\n architecture, i, features, mode, labels, previous_ensemble_spec,\n config)\n previous_ensemble = previous_ensemble_spec.ensemble\n previous_ensemble_summary = self._summary_maker(\n namespace=\"ensemble\",\n scope=previous_ensemble_spec.name,\n skip_summary=skip_summaries)\n previous_ensemble_subnetwork_builders = (\n previous_ensemble_spec.subnetwork_builders)\n previous_iteration_vars = None\n if is_growing_phase:\n # Keep track of the previous iteration variables so we can restore them\n # from the previous checkpoint after growing the graph. After this line,\n # any variables created will not have a matching one in the checkpoint\n # until it gets overwritten.\n # Note: It's not possible to just create a tf.train.Saver here since\n # this code is also run on TPU, which does not support creating Savers\n # inside model_fn.\n previous_iteration_vars = (\n tf_compat.v1.get_collection(tf_compat.v1.GraphKeys.GLOBAL_VARIABLES)\n + tf_compat.v1.get_collection(\n tf_compat.v1.GraphKeys.SAVEABLE_OBJECTS))\n previous_ensemble_reports, all_reports = [], []\n if self._report_materializer:\n previous_ensemble_reports, all_reports = (\n self._collate_subnetwork_reports(iteration_number))\n\n subnetwork_builders = self._call_generate_candidates(\n previous_ensemble=previous_ensemble,\n iteration_number=iteration_number,\n previous_ensemble_reports=previous_ensemble_reports,\n all_reports=all_reports,\n config=config)\n ensemble_candidates = []\n for ensemble_strategy in self._ensemble_strategies:\n ensemble_candidates += ensemble_strategy.generate_ensemble_candidates(\n subnetwork_builders, previous_ensemble_subnetwork_builders)\n current_iteration = self._iteration_builder.build_iteration(\n base_global_step=base_global_step,\n iteration_number=iteration_number,\n ensemble_candidates=ensemble_candidates,\n subnetwork_builders=subnetwork_builders,\n features=features,\n labels=labels,\n mode=mode,\n config=config,\n previous_ensemble_summary=previous_ensemble_summary,\n previous_ensemble_spec=previous_ensemble_spec,\n best_ensemble_index_override=best_ensemble_index_override)\n\n return current_iteration, previous_iteration_vars\n\n def _adanet_model_fn(self, features, labels, mode, params, config):\n \"\"\"AdaNet model_fn.\n\n Args:\n features: Dictionary of `Tensor` objects keyed by feature name.\n labels: Labels `Tensor` or a dictionary of string label name to `Tensor`\n (for multi-head). Can be `None`.\n mode: Defines whether this is training, evaluation or prediction. See\n `ModeKeys`.\n params: A dict of parameters.\n config: The current `tf.estimator.RunConfig`.\n\n Returns:\n A `EstimatorSpec` instance.\n\n Raises:\n UserWarning: When calling model_fn directly in TRAIN mode.\n \"\"\"\n\n # Unpack params.\n is_growing_phase = params.get(\"is_growing_phase\", False)\n is_inside_training_loop = params.get(\"is_inside_training_loop\", False)\n evaluation_checkpoint_path = params.get(\"evaluation_checkpoint_path\", None)\n evaluation_name = params.get(\"evaluation_name\", None)\n best_ensemble_index = params.get(\"best_ensemble_index\", None)\n\n training = mode == tf.estimator.ModeKeys.TRAIN\n if training and not is_inside_training_loop:\n raise UserWarning(\n \"The adanet.Estimator's model_fn should not be called directly in \"\n \"TRAIN mode, because its behavior is undefined outside the context \"\n \"of its `train` method. If you are trying to add custom metrics \"\n \"with `tf.contrib.estimator.add_metrics`, pass the `metric_fn` to \"\n \"this `Estimator's` constructor instead.\")\n\n current_iteration, previous_iteration_vars = self._create_iteration(\n features,\n labels,\n mode,\n config,\n is_growing_phase,\n evaluation_checkpoint_path=evaluation_checkpoint_path,\n best_ensemble_index_override=best_ensemble_index)\n\n # Variable which allows us to read the current iteration from a checkpoint.\n # This must be created here so it is available when calling\n # _execute_bookkeeping_phase after the first iteration.\n iteration_number_tensor = tf_compat.v1.get_variable(\n self._Keys.CURRENT_ITERATION,\n shape=[],\n dtype=tf.int64,\n initializer=tf_compat.v1.zeros_initializer(),\n trainable=False)\n\n return self._create_estimator_spec(\n current_iteration,\n mode,\n iteration_number_tensor,\n previous_iteration_vars,\n is_growing_phase,\n evaluation_name=evaluation_name)\n" ]
[ [ "tensorflow.io.gfile.GFile", "tensorflow.train.latest_checkpoint", "tensorflow.io.gfile.rmtree", "tensorflow.train.Coordinator", "tensorflow.train.load_variable", "tensorflow.Graph", "tensorflow.train.get_checkpoint_state", "tensorflow.io.gfile.makedirs", "tensorflow.io.gfile.exists", "tensorflow.estimator.RunConfig", "tensorflow.constant", "tensorflow.python.util.deprecation.deprecated", "tensorflow.device", "tensorflow.estimator.Estimator", "tensorflow.identity" ] ]
cxqj/6-ssn.pytorch
[ "85688f5eda96cdb545f48f03b7fd75a4122fface" ]
[ "ssn_dataset.py" ]
[ "import torch.utils.data as data\n\nimport os\nimport os.path\nfrom numpy.random import randint\nfrom ops.io import load_proposal_file\nfrom transforms import *\nfrom ops.utils import temporal_iou\n\n\nclass SSNInstance:\n\n def __init__(self, start_frame, end_frame, video_frame_count,\n fps=1, label=None,\n best_iou=None, overlap_self=None):\n self.start_frame = start_frame\n self.end_frame = min(end_frame, video_frame_count)\n self._label = label\n self.fps = fps\n\n self.coverage = (end_frame - start_frame) / video_frame_count\n\n self.best_iou = best_iou\n self.overlap_self = overlap_self\n\n self.loc_reg = None\n self.size_reg = None\n\n def compute_regression_targets(self, gt_list, fg_thresh):\n if self.best_iou < fg_thresh:\n # background proposals do not need this\n return\n\n # find the groundtruth instance with the highest IOU\n ious = [temporal_iou((self.start_frame, self.end_frame), (gt.start_frame, gt.end_frame)) for gt in gt_list]\n best_gt_id = np.argmax(ious)\n\n best_gt = gt_list[best_gt_id]\n\n prop_center = (self.start_frame + self.end_frame) / 2\n gt_center = (best_gt.start_frame + best_gt.end_frame) / 2\n\n prop_size = self.end_frame - self.start_frame + 1\n gt_size = best_gt.end_frame - best_gt.start_frame + 1\n\n # get regression target:\n # (1). center shift propotional to the proposal duration\n # (2). logarithm of the groundtruth duration over proposal duraiton\n\n self.loc_reg = (gt_center - prop_center) / prop_size\n try:\n self.size_reg = math.log(gt_size / prop_size)\n except:\n print(gt_size, prop_size, self.start_frame, self.end_frame)\n raise\n\n @property\n def start_time(self):\n return self.start_frame / self.fps\n\n @property\n def end_time(self):\n return self.end_frame / self.fps\n\n @property\n def label(self):\n return self._label if self._label is not None else -1\n\n @property\n def regression_targets(self):\n return [self.loc_reg, self.size_reg] if self.loc_reg is not None else [0, 0]\n\n\nclass SSNVideoRecord:\n def __init__(self, prop_record):\n self._data = prop_record\n\n frame_count = int(self._data[1])\n\n # build instance record\n self.gt = [\n SSNInstance(int(x[1]), int(x[2]), frame_count, label=int(x[0]), best_iou=1.0) for x in self._data[2]\n if int(x[2]) > int(x[1])\n ]\n\n self.gt = list(filter(lambda x: x.start_frame < frame_count, self.gt))\n\n self.proposals = [\n SSNInstance(int(x[3]), int(x[4]), frame_count, label=int(x[0]),\n best_iou=float(x[1]), overlap_self=float(x[2])) for x in self._data[3] if int(x[4]) > int(x[3])\n ]\n\n self.proposals = list(filter(lambda x: x.start_frame < frame_count, self.proposals))\n\n @property\n def id(self):\n return self._data[0]\n\n @property\n def num_frames(self):\n return int(self._data[1])\n\n def get_fg(self, fg_thresh, with_gt=True):\n fg = [p for p in self.proposals if p.best_iou > fg_thresh]\n if with_gt:\n fg.extend(self.gt)\n\n for x in fg:\n x.compute_regression_targets(self.gt, fg_thresh)\n return fg\n\n def get_negatives(self, incomplete_iou_thresh, bg_iou_thresh,\n bg_coverage_thresh=0.01, incomplete_overlap_thresh=0.7):\n\n tag = [0] * len(self.proposals)\n\n incomplete_props = []\n background_props = []\n\n for i in range(len(tag)):\n if self.proposals[i].best_iou < incomplete_iou_thresh \\\n and self.proposals[i].overlap_self > incomplete_overlap_thresh:\n tag[i] = 1 # incomplete\n incomplete_props.append(self.proposals[i])\n\n for i in range(len(tag)):\n if tag[i] == 0 and \\\n self.proposals[i].best_iou < bg_iou_thresh and \\\n self.proposals[i].coverage > bg_coverage_thresh:\n background_props.append(self.proposals[i])\n return incomplete_props, background_props\n\n\nclass SSNDataSet(data.Dataset):\n\n def __init__(self, root_path,\n prop_file=None,\n body_seg=5, aug_seg=2, video_centric=True,\n new_length=1, modality='RGB',\n image_tmpl='img_{:05d}.jpg', transform=None,\n random_shift=True, test_mode=False,\n prop_per_video=8, fg_ratio=1, bg_ratio=1, incomplete_ratio=6,\n fg_iou_thresh=0.7,\n bg_iou_thresh=0.01, incomplete_iou_thresh=0.3,\n bg_coverage_thresh=0.02, incomplete_overlap_thresh=0.7,\n gt_as_fg=True, reg_stats=None, test_interval=6, verbose=True,\n exclude_empty=True, epoch_multiplier=1):\n\n self.root_path = root_path\n self.prop_file = prop_file\n self.verbose = verbose\n\n self.body_seg = body_seg\n self.aug_seg = aug_seg\n self.video_centric = video_centric\n self.exclude_empty = exclude_empty\n self.epoch_multiplier = epoch_multiplier\n\n self.new_length = new_length\n self.modality = modality\n self.image_tmpl = image_tmpl\n self.transform = transform\n self.random_shift = random_shift\n self.test_mode = test_mode\n self.test_interval = test_interval\n\n self.fg_iou_thresh = fg_iou_thresh\n self.incomplete_iou_thresh = incomplete_iou_thresh\n self.bg_iou_thresh = bg_iou_thresh\n\n self.bg_coverage_thresh = bg_coverage_thresh\n self.incomplete_overlap_thresh = incomplete_overlap_thresh\n\n self.starting_ratio = 0.5\n self.ending_ratio = 0.5\n\n self.gt_as_fg = gt_as_fg\n\n denum = fg_ratio + bg_ratio + incomplete_ratio\n\n self.fg_per_video = int(prop_per_video * (fg_ratio / denum))\n self.bg_per_video = int(prop_per_video * (bg_ratio / denum))\n self.incomplete_per_video = prop_per_video - self.fg_per_video - self.bg_per_video\n\n self._parse_prop_file(stats=reg_stats)\n\n def _load_image(self, directory, idx):\n if self.modality == 'RGB' or self.modality == 'RGBDiff':\n return [Image.open(os.path.join(directory, self.image_tmpl.format(idx))).convert('RGB')]\n elif self.modality == 'Flow':\n x_img = Image.open(os.path.join(directory, self.image_tmpl.format('x', idx))).convert('L')\n y_img = Image.open(os.path.join(directory, self.image_tmpl.format('y', idx))).convert('L')\n\n return [x_img, y_img]\n\n def _parse_prop_file(self, stats=None):\n prop_info = load_proposal_file(self.prop_file)\n\n self.video_list = [SSNVideoRecord(p) for p in prop_info]\n\n if self.exclude_empty:\n self.video_list = list(filter(lambda x: len(x.gt) > 0, self.video_list))\n\n self.video_dict = {v.id: v for v in self.video_list}\n\n # construct three pools:\n # 1. Foreground\n # 2. Background\n # 3. Incomplete\n\n self.fg_pool = []\n self.bg_pool = []\n self.incomp_pool = []\n\n for v in self.video_list:\n self.fg_pool.extend([(v.id, prop) for prop in v.get_fg(self.fg_iou_thresh, self.gt_as_fg)])\n\n incomp, bg = v.get_negatives(self.incomplete_iou_thresh, self.bg_iou_thresh,\n self.bg_coverage_thresh, self.incomplete_overlap_thresh)\n\n self.incomp_pool.extend([(v.id, prop) for prop in incomp])\n self.bg_pool.extend([(v.id, prop) for prop in bg])\n\n if stats is None:\n self._compute_regresssion_stats()\n else:\n self.stats = stats\n\n if self.verbose:\n print(\"\"\"\n \n SSNDataset: Proposal file {prop_file} parsed.\n \n There are {pnum} usable proposals from {vnum} videos.\n {fnum} foreground proposals\n {inum} incomplete_proposals\n {bnum} background_proposals\n \n Sampling config:\n FG/BG/INC: {fr}/{br}/{ir}\n Video Centric: {vc}\n \n Epoch size multiplier: {em}\n \n Regression Stats:\n Location: mean {stats[0][0]:.05f} std {stats[1][0]:.05f}\n Duration: mean {stats[0][1]:.05f} std {stats[1][1]:.05f}\n \"\"\".format(prop_file=self.prop_file, pnum=len(self.fg_pool) + len(self.bg_pool) + len(self.incomp_pool),\n fnum=len(self.fg_pool), inum=len(self.incomp_pool), bnum=len(self.bg_pool),\n fr=self.fg_per_video, br=self.bg_per_video, ir=self.incomplete_per_video, vnum=len(self.video_dict),\n vc=self.video_centric, stats=self.stats, em=self.epoch_multiplier))\n else:\n print(\"\"\"\n SSNDataset: Proposal file {prop_file} parsed. \n \"\"\".format(prop_file=self.prop_file))\n\n\n def _video_centric_sampling(self, video):\n\n fg = video.get_fg(self.fg_iou_thresh, self.gt_as_fg)\n incomp, bg = video.get_negatives(self.incomplete_iou_thresh, self.bg_iou_thresh,\n self.bg_coverage_thresh, self.incomplete_overlap_thresh)\n\n def sample_video_proposals(proposal_type, video_id, video_pool, requested_num, dataset_pool):\n if len(video_pool) == 0:\n # if there is nothing in the video pool, go fetch from the dataset pool\n return [(dataset_pool[x], proposal_type) for x in np.random.choice(len(dataset_pool), requested_num, replace=False)]\n else:\n replicate = len(video_pool) < requested_num\n idx = np.random.choice(len(video_pool), requested_num, replace=replicate)\n return [((video_id, video_pool[x]), proposal_type) for x in idx]\n\n out_props = []\n out_props.extend(sample_video_proposals(0, video.id, fg, self.fg_per_video, self.fg_pool)) # sample foreground\n out_props.extend(sample_video_proposals(1, video.id, incomp, self.incomplete_per_video, self.incomp_pool)) # sample incomp.\n out_props.extend(sample_video_proposals(2, video.id, bg, self.bg_per_video, self.bg_pool)) # sample background\n\n return out_props\n\n def _random_sampling(self):\n out_props = []\n\n out_props.extend([(x, 0) for x in np.random.choice(self.fg_pool, self.fg_per_video, replace=False)])\n out_props.extend([(x, 1) for x in np.random.choice(self.incomp_pool, self.incomplete_per_video, replace=False)])\n out_props.extend([(x, 2) for x in np.random.choice(self.bg_pool, self.bg_per_video, replace=False)])\n\n return out_props\n\n def _sample_indices(self, valid_length, num_seg):\n \"\"\"\n\n :param record: VideoRecord\n :return: list\n \"\"\"\n\n average_duration = (valid_length + 1) // num_seg\n if average_duration > 0:\n # normal cases\n offsets = np.multiply(list(range(num_seg)), average_duration) \\\n + randint(average_duration, size=num_seg)\n elif valid_length > num_seg:\n offsets = np.sort(randint(valid_length, size=num_seg))\n else:\n offsets = np.zeros((num_seg, ))\n\n return offsets\n\n def _get_val_indices(self, valid_length, num_seg):\n\n if valid_length > num_seg:\n tick = valid_length / float(num_seg)\n offsets = np.array([int(tick / 2.0 + tick * x) for x in range(num_seg)])\n else:\n offsets = np.zeros((num_seg,))\n\n return offsets\n\n def _sample_ssn_indices(self, prop, frame_cnt):\n start_frame = prop.start_frame + 1\n end_frame = prop.end_frame\n\n duration = end_frame - start_frame + 1\n assert duration != 0, (prop.start_frame, prop.end_frame, prop.best_iou)\n valid_length = duration - self.new_length\n\n valid_starting = max(1, start_frame - int(duration * self.starting_ratio))\n valid_ending = min(frame_cnt - self.new_length + 1, end_frame + int(duration * self.ending_ratio))\n\n valid_starting_length = (start_frame - valid_starting - self.new_length + 1)\n valid_ending_length = (valid_ending - end_frame - self.new_length + 1)\n\n starting_scale = (valid_starting_length + self.new_length - 1) / (duration * self.starting_ratio)\n ending_scale = (valid_ending_length + self.new_length - 1) / (duration * self.ending_ratio)\n\n # get starting\n starting_offsets = (self._sample_indices(valid_starting_length, self.aug_seg) if self.random_shift\n else self._get_val_indices(valid_starting_length, self.aug_seg)) + valid_starting\n course_offsets = (self._sample_indices(valid_length, self.body_seg) if self.random_shift\n else self._get_val_indices(valid_length, self.body_seg)) + start_frame\n ending_offsets = (self._sample_indices(valid_ending_length, self.aug_seg) if self.random_shift\n else self._get_val_indices(valid_ending_length, self.aug_seg)) + end_frame\n\n offsets = np.concatenate((starting_offsets, course_offsets, ending_offsets))\n stage_split = [self.aug_seg, self.aug_seg + self.body_seg, self.aug_seg * 2 + self.body_seg]\n return offsets, starting_scale, ending_scale, stage_split\n\n def _load_prop_data(self, prop):\n\n # read frame count\n frame_cnt = self.video_dict[prop[0][0]].num_frames\n\n # sample segment indices\n prop_indices, starting_scale, ending_scale, stage_split = self._sample_ssn_indices(prop[0][1], frame_cnt)\n\n # turn prop into standard format\n\n # get label\n if prop[1] == 0:\n label = prop[0][1].label\n elif prop[1] == 1:\n label = prop[0][1].label # incomplete\n elif prop[1] == 2:\n label = 0 # background\n else:\n raise ValueError()\n frames = []\n for idx, seg_ind in enumerate(prop_indices):\n p = int(seg_ind)\n for x in range(self.new_length):\n frames.extend(self._load_image(prop[0][0], min(frame_cnt, p+x)))\n\n # get regression target\n if prop[1] == 0:\n reg_targets = prop[0][1].regression_targets\n reg_targets = (reg_targets[0] - self.stats[0][0]) / self.stats[1][0], \\\n (reg_targets[1] - self.stats[0][1]) / self.stats[1][1]\n else:\n reg_targets = (0.0, 0.0)\n\n return frames, label, reg_targets, starting_scale, ending_scale, stage_split, prop[1]\n\n def _compute_regresssion_stats(self):\n if self.verbose:\n print(\"computing regression target normalizing constants\")\n targets = []\n for video in self.video_list:\n fg = video.get_fg(self.fg_iou_thresh, False)\n for p in fg:\n targets.append(list(p.regression_targets))\n\n self.stats = np.array((np.mean(targets, axis=0), np.std(targets, axis=0)))\n\n def get_test_data(self, video, test_interval, gen_batchsize=4):\n props = video.proposals\n video_id = video.id\n frame_cnt = video.num_frames\n frame_ticks = np.arange(0, frame_cnt - self.new_length, test_interval, dtype=np.int) + 1\n\n num_sampled_frames = len(frame_ticks)\n\n # avoid empty proposal list\n if len(props) == 0:\n props.append(SSNInstance(0, frame_cnt - 1, frame_cnt))\n\n # process proposals to subsampled sequences\n rel_prop_list = []\n proposal_tick_list = []\n scaling_list = []\n for proposal in props:\n rel_prop = proposal.start_frame / frame_cnt, proposal.end_frame / frame_cnt\n rel_duration = rel_prop[1] - rel_prop[0]\n rel_starting_duration = rel_duration * self.starting_ratio\n rel_ending_duration = rel_duration * self.ending_ratio\n rel_starting = rel_prop[0] - rel_starting_duration\n rel_ending = rel_prop[1] + rel_ending_duration\n\n real_rel_starting = max(0.0, rel_starting)\n real_rel_ending = min(1.0, rel_ending)\n\n starting_scaling = (rel_prop[0] - real_rel_starting) / rel_starting_duration\n ending_scaling = (real_rel_ending - rel_prop[1]) / rel_ending_duration\n\n proposal_ticks = int(real_rel_starting * num_sampled_frames), int(rel_prop[0] * num_sampled_frames), \\\n int(rel_prop[1] * num_sampled_frames), int(real_rel_ending * num_sampled_frames)\n\n rel_prop_list.append(rel_prop)\n proposal_tick_list.append(proposal_ticks)\n scaling_list.append((starting_scaling, ending_scaling))\n\n # load frames\n # Since there are many frames for each video during testing, instead of returning the read frames,\n # we return a generator which gives the frames in small batches, this lower the memory burden\n # and runtime overhead. Usually setting batchsize=4 would fit most cases.\n def frame_gen(batchsize):\n frames = []\n cnt = 0\n for idx, seg_ind in enumerate(frame_ticks):\n p = int(seg_ind)\n for x in range(self.new_length):\n frames.extend(self._load_image(video_id, min(frame_cnt, p+x)))\n cnt += 1\n\n if cnt % batchsize == 0:\n frames = self.transform(frames)\n yield frames\n frames = []\n\n if len(frames):\n frames = self.transform(frames)\n yield frames\n\n return frame_gen(gen_batchsize), len(frame_ticks), torch.from_numpy(np.array(rel_prop_list)), \\\n torch.from_numpy(np.array(proposal_tick_list)), torch.from_numpy(np.array(scaling_list))\n\n def get_training_data(self, index):\n if self.video_centric:\n video = self.video_list[index]\n props = self._video_centric_sampling(video)\n else:\n props = self._random_sampling()\n\n out_frames = []\n out_prop_len = []\n out_prop_scaling = []\n out_prop_type = []\n out_prop_labels = []\n out_prop_reg_targets = []\n out_stage_split = []\n for idx, p in enumerate(props):\n prop_frames, prop_label, reg_targets, starting_scale, ending_scale, stage_split, prop_type = self._load_prop_data(\n p)\n\n processed_frames = self.transform(prop_frames)\n out_frames.append(processed_frames)\n out_prop_len.append(self.body_seg + 2 * self.aug_seg)\n out_prop_scaling.append([starting_scale, ending_scale])\n out_prop_labels.append(prop_label)\n out_prop_reg_targets.append(reg_targets)\n out_prop_type.append(prop_type)\n out_stage_split.append(stage_split)\n\n out_prop_len = torch.from_numpy(np.array(out_prop_len))\n out_prop_scaling = torch.from_numpy(np.array(out_prop_scaling, dtype=np.float32))\n out_prop_labels = torch.from_numpy(np.array(out_prop_labels))\n out_prop_reg_targets = torch.from_numpy(np.array(out_prop_reg_targets, dtype=np.float32))\n out_prop_type = torch.from_numpy(np.array(out_prop_type))\n out_stage_split = torch.from_numpy(np.array(out_stage_split))\n out_frames = torch.cat(out_frames)\n return out_frames, out_prop_len, out_prop_scaling, out_prop_type, out_prop_labels, \\\n out_prop_reg_targets, out_stage_split\n\n def get_all_gt(self):\n gt_list = []\n for video in self.video_list:\n vid = video.id\n gt_list.extend([[vid, x.label - 1, x.start_frame / video.num_frames,\n x.end_frame / video.num_frames] for x in video.gt])\n return gt_list\n\n def __getitem__(self, index):\n real_index = index % len(self.video_list)\n if self.test_mode:\n return self.get_test_data(self.video_list[real_index], self.test_interval)\n else:\n return self.get_training_data(real_index)\n\n def __len__(self):\n return len(self.video_list) * self.epoch_multiplier" ]
[ [ "numpy.random.randint" ] ]
millerjoey975/usaspending-api
[ "66dd6b231087e92696d0ac09ef7700b6069829ad" ]
[ "usaspending_api/etl/elasticsearch_loader_helpers/delete_data.py" ]
[ "import logging\n\nimport pandas as pd\n\nfrom django.conf import settings\nfrom time import perf_counter\nfrom typing import Optional, Dict, Union, Any\n\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Search\nfrom elasticsearch_dsl.mapping import Mapping\n\nfrom usaspending_api.common.helpers.s3_helpers import retrieve_s3_bucket_object_list, access_s3_object\nfrom usaspending_api.etl.elasticsearch_loader_helpers.index_config import (\n ES_AWARDS_UNIQUE_KEY_FIELD,\n ES_TRANSACTIONS_UNIQUE_KEY_FIELD,\n)\nfrom usaspending_api.etl.elasticsearch_loader_helpers.utilities import (\n execute_sql_statement,\n format_log,\n chunks,\n)\n\nlogger = logging.getLogger(\"script\")\n\n\ndef delete_docs_by_unique_key(\n client: Elasticsearch,\n key: str,\n value_list: list,\n task_id: str,\n index,\n refresh_after: bool = True,\n delete_chunk_size: int = 1000,\n) -> int:\n \"\"\"\n Bulk delete a batch of documents whose field identified by ``key`` matches any value provided in the\n ``values_list``.\n\n NOTE: This delete routine looks at just the index name given. If there are duplicate records across\n multiple indexes, an alias or wildcard should be provided for ``index`` param that covers multiple\n indices, or this will need to be run once per index.\n\n Args:\n client (Elasticsearch): elasticsearch-dsl client for making calls to an ES cluster\n key (str): name of field in targeted elasticsearch index that should have a unique value for\n every doc in the index. The field or sub-field provided MUST be of ``keyword`` type (or ``_id`` meta field)\n value_list (list): if key field has these values, the document will be deleted\n task_id (str): name of ES ETL job being run, used in logging\n index (str): name of index (or alias) to target for the ``_delete_by_query`` ES operation.\n refresh_after (bool): Whether to call ``_refresh`` on the index when all of the provided values in\n ``value_list`` have been processed for delete; defaults to ``True``. If many small deletes happen at a\n rapid rate, it may be best to set this ``False`` and await a deferred refresh afterward in the calling\n code. NOTE: This param will be ignored and a refresh will be attempted if this function\n errors-out during execution, in order to not leave un-refreshed deletes in the index.\n delete_chunk_size (int): the batch-size of terms value-array given to each _delete_by_query call. Needs to be\n less than 65536 (max values for any terms query), and less than index.max_results_window setting. Ideally\n use ``config[\"partition_size\"]`` (derived from --partition-size) to set this to a calibrated value. If not\n provided, uses 1000 as a safe default (10,000 resulted in some timeouts on a busy cluster).\n\n Returns: Number of ES documents deleted\n \"\"\"\n start = perf_counter()\n\n if len(value_list) == 0:\n logger.info(format_log(\"Nothing to delete\", action=\"Delete\", name=task_id))\n return 0\n\n logger.info(format_log(f\"Deleting up to {len(value_list):,} document(s)\", action=\"Delete\", name=task_id))\n if not index:\n raise RuntimeError(\"index name must be provided\")\n\n if not _is_allowed_key_field_type(client, key, index):\n msg = (\n f'Cannot perform deletes in index \"{index}\" by key field \"{key}\" because its type is not one of '\n f\"the allowed field types, or the field was not found in that index.\"\n )\n logger.error(format_log(msg=msg, action=\"Delete\", name=task_id))\n raise RuntimeError(msg)\n\n if delete_chunk_size > 65536:\n # 65,536 is max number of terms that can be added to an ES terms filter query\n msg = (\n f\"{delete_chunk_size} is greater than 65,536, which is the max number of terms that can be added to an ES \"\n f\"terms filter query\"\n )\n logger.error(format_log(msg=msg, action=\"Delete\"))\n raise RuntimeError(msg)\n\n chunks_processed = 0\n deleted = 0\n is_error = False\n try:\n values_generator = chunks(value_list, delete_chunk_size)\n for chunk_of_values in values_generator:\n # Invoking _delete_by_query as per the elasticsearch-dsl docs:\n # https://elasticsearch-dsl.readthedocs.io/en/latest/search_dsl.html#delete-by-query\n # _refresh is deferred until the end of chunk processing\n q = Search(using=client, index=index).filter(\"terms\", **{key: chunk_of_values}) # type: Search\n # params:\n # conflicts=\"proceed\": Ignores version conflict errors if a doc delete is attempted more than once\n # slices=\"auto\": Will create parallel delete batches per shard\n q = q.params(conflicts=\"proceed\", slices=\"auto\")\n response = q.delete()\n # Some subtle errors come back on the response\n if response[\"timed_out\"]:\n msg = f\"Delete request timed out on cluster after {int(response['took'])/1000:.2f}s\"\n logger.error(format_log(msg=msg, action=\"Delete\", name=task_id))\n raise RuntimeError(msg)\n if response[\"failures\"]:\n fail_snippet = \"\\n\\t\\t\" + \"\\n\\t\\t\".join(map(str, response[\"failures\"][0:4])) + \"\\n\\t\\t\" + \"...\"\n msg = f\"Some docs failed to delete on cluster:{fail_snippet}\"\n logger.error(format_log(msg=msg, action=\"Delete\", name=task_id))\n raise RuntimeError(msg)\n logger.info(\n format_log(\n f\"Deleted {response['deleted']:,} docs in ES from chunk of size {len(chunk_of_values):,} \"\n f\"in {int(response['took'])/1000:.2f}s, \"\n f\"and ignored {response['version_conflicts']:,} version conflicts\",\n action=\"Delete\",\n name=task_id,\n )\n )\n deleted += response[\"deleted\"]\n chunks_processed += 1\n except Exception:\n is_error = True\n logger.exception(format_log(\"\", name=task_id, action=\"Delete\"))\n raise\n finally:\n if deleted > 0 and (refresh_after or is_error):\n if not is_error:\n refresh_msg = \"Refreshing index so deletes take effect\"\n else:\n refresh_msg = \"Attempting index refresh while handling error so deletes take effect\"\n logger.info(format_log(refresh_msg, action=\"Delete\", name=task_id))\n client.indices.refresh(index=index)\n if chunks_processed > 1 or is_error:\n # This log becomes redundant unless to log the sum of multiple chunks' deletes (or error)\n error_text = \" before encountering an error\" if is_error else \"\"\n duration = perf_counter() - start\n docs = f\"document{'s' if deleted != 1 else ''}\"\n msg = f\"Delete operation took {duration:.2f}s. Removed {deleted:,} total {docs}{error_text}\"\n logger.info(format_log(msg, action=\"Delete\", name=task_id))\n\n return deleted\n\n\ndef _is_allowed_key_field_type(client: Elasticsearch, key_field: str, index: str) -> bool:\n \"\"\"Return ``True`` if the given field's mapping in the given index is in our allowed list of ES types\n compatible with term(s) queries\n\n This is mainly to prevent use of ``text`` fields in terms queries, which give bad results because Elasticsearch\n changes the values of text fields during analysis.\n \"\"\"\n if key_field == \"_id\":\n # Special case. It is a reserved field, without a type, but can effectively be treated as a keyword field\n return True\n\n # Get true index name from alias, if provided an alias\n response = client.indices.get(index)\n aliased_index_name = list(response.keys())[0]\n es_field_type = Mapping().from_es(using=client, index=aliased_index_name).resolve_field(key_field)\n # This is the allowed types whitelist. More can be added as-needed if compatible with terms(s) queries.\n if es_field_type and es_field_type.name in [\"keyword\", \"integer\"]:\n return True\n return False\n\n\ndef _lookup_deleted_award_keys(\n client: Elasticsearch,\n lookup_key: str,\n value_list: list,\n config: dict,\n index: Optional[str] = None,\n lookup_chunk_size: int = 50000,\n) -> list:\n \"\"\"Derive a list of award keys given a target index, Lookup field, and lookup values\n\n This returns a list of all unique award keys, which are compiled from the ``ES_AWARDS_UNIQUE_KEY_FIELD`` field of\n any document in the given ``index`` that matches the query. The matching query is a terms query that will return\n the doc if its ``lookup_key`` field has any value provided in ``value_list``.\n\n Args:\n client (Elasticsearch): elasticsearch-dsl client for making calls to an ES cluster\n lookup_key (str): name of field in targeted elasticsearch index by which we are looking up docs. The field or\n sub-field provided MUST be of ``keyword`` type (or ``_id`` meta field)\n value_list (list): if lookup_key field has any of these values, the document will be returned from the lookup\n config (dict): collection of key-value pairs that encapsulates runtime arguments for this ES management task\n index (str): Optional name, alias, or pattern of index this query will target. Looks up via config if not\n provided\n lookup_chunk_size (int): the batch-size of terms value-array to be looked-up. Needs to be less\n than 65536 (max values for any terms query), and less than config[\"max_query_size\"]\n\n Returns: list of values for the ES_AWARDS_UNIQUE_KEY_FIELD fields in the looked-up documents.\n \"\"\"\n if index is None:\n index = f\"{config['query_alias_prefix']}-*\"\n\n if not _is_allowed_key_field_type(client, lookup_key, index):\n msg = (\n f'Cannot perform lookups in index \"{index}\" with key field \"{lookup_key}\" because its type is not one of '\n f\"the allowed field types, or the field was not found in that index.\"\n )\n logger.error(format_log(msg=msg, action=\"Delete\"))\n raise RuntimeError(msg)\n\n if lookup_chunk_size > 65536:\n # 65,536 is max number of terms that can be added to an ES terms filter query\n msg = (\n f\"{lookup_chunk_size} is greater than 65,536, which is the max number of terms that can be added to an ES \"\n f\"terms filter query\"\n )\n logger.error(format_log(msg=msg, action=\"Delete\"))\n raise RuntimeError(msg)\n\n if lookup_chunk_size > config[\"max_query_size\"]:\n # Some keys would be left undiscovered if our chunk was cut short by the query only returning a lesser subset\n msg = (\n f\"{lookup_chunk_size} is greater {config['max_query_size']}, which is the max number of query \"\n f\"results returnable from this index. Use a smaller chunk or increase max_result_window for this index.\"\n )\n logger.error(format_log(msg=msg, action=\"Delete\"))\n raise RuntimeError(msg)\n\n award_key_list = []\n values_generator = chunks(value_list, lookup_chunk_size)\n for chunk_of_values in values_generator:\n q = Search(using=client, index=index).filter(\"terms\", **{lookup_key: chunk_of_values}) # type: Search\n q.update_from_dict({\"size\": config[\"max_query_size\"]})\n response = q.execute()\n if response[\"hits\"][\"total\"][\"value\"] != 0:\n award_key_list += [x[\"_source\"][ES_AWARDS_UNIQUE_KEY_FIELD] for x in response[\"hits\"][\"hits\"]]\n return award_key_list\n\n\ndef delete_awards(client: Elasticsearch, config: dict, task_id: str = \"Sync DB Deletes\") -> int:\n \"\"\"Delete all awards in the Elasticsearch awards index that were deleted in the source database.\n\n This performs the deletes of award documents in ES in a series of batches, as there could be many. Millions of\n awards deleted may take a prohibitively long time, and it could be better to just re-index all documents from\n the DB instead.\n\n This requires looking-up the awards-to-delete by finding the unique-key of each parent award to any deleted\n transaction, and then getting the distinct list of unique-award-keys that are NOT present in the database; then\n deleting those in the ES awards index.\n - The deleted transactions are recorded in a CSV delete log file in S3.\n - NOTE!! This order of operations therefore requires that ES award deletes be processed BEFORE transaction\n ES deletes are (both deletes cannot run in parallel).\n\n Args:\n client (Elasticsearch): elasticsearch-dsl client for making calls to an ES cluster\n config (dict): collection of key-value pairs that encapsulates runtime arguments for this ES management task\n task_id (str): label for this sub-step of the ETL\n\n Returns: Number of ES docs deleted in the index\n \"\"\"\n deleted_tx_keys = _gather_deleted_transaction_keys(config)\n # While extracting unique award keys, the lookup is on transactions and must match against the unique transaction id\n award_keys = _lookup_deleted_award_keys(\n client,\n ES_TRANSACTIONS_UNIQUE_KEY_FIELD,\n [*deleted_tx_keys],\n config,\n settings.ES_TRANSACTIONS_QUERY_ALIAS_PREFIX + \"-*\",\n )\n award_keys = list(set(award_keys)) # get unique list of keys\n award_keys_len = len(award_keys)\n if award_keys_len == 0:\n logger.info(\n format_log(\n f\"No related awards found for deletion. Zero transaction docs found from which to derive awards.\",\n action=\"Delete\",\n name=task_id,\n )\n )\n return 0\n logger.info(\n format_log(f\"Derived {award_keys_len} award keys from transactions in ES\", action=\"Delete\", name=task_id)\n )\n\n deleted_award_kvs = _check_awards_for_deletes(award_keys)\n deleted_award_kvs_len = len(deleted_award_kvs)\n if deleted_award_kvs_len == 0:\n # In this case it could be an award's transaction was deleted, but not THE LAST transaction of that award.\n # i.e. the deleted transaction's \"siblings\" are still in the DB and therefore the parent award should remain\n logger.info(\n format_log(\n f\"No related awards found will be deleted. All derived awards are still in the DB.\",\n action=\"Delete\",\n name=task_id,\n )\n )\n return 0\n logger.info(\n format_log(\n f\"{deleted_award_kvs_len} awards no longer in the DB will be removed from ES\", action=\"Delete\", name=task_id\n )\n )\n\n values_list = [v for d in deleted_award_kvs for v in d.values()]\n return delete_docs_by_unique_key(\n client,\n key=config[\"unique_key_field\"],\n value_list=values_list,\n task_id=task_id,\n index=config[\"index_name\"],\n delete_chunk_size=config[\"partition_size\"],\n )\n\n\ndef delete_transactions(client: Elasticsearch, config: dict, task_id: str = \"Sync DB Deletes\") -> int:\n \"\"\"Delete all transactions in the Elasticsearch transactions index that were deleted in the source database.\n\n This performs the deletes of transaction documents in ES in a series of batches, as there could be many. Millions of\n transactions deleted may take a prohibitively long time, and it could be better to just re-index all documents from\n the DB instead.\n\n Side Effects:\n The index from which docs were deleted will be refreshed if the delete was successful\n and removed more than 0 docs.\n\n Args:\n client (Elasticsearch): elasticsearch-dsl client for making calls to an ES cluster\n config (dict): collection of key-value pairs that encapsulates runtime arguments for this ES management task\n task_id (str): label for this sub-step of the ETL\n\n\n Returns: Number of ES docs deleted in the index\n \"\"\"\n deleted_tx_keys = _gather_deleted_transaction_keys(config)\n return delete_docs_by_unique_key(\n client,\n key=config[\"unique_key_field\"],\n value_list=[*deleted_tx_keys],\n task_id=\"Sync DB Deletes\",\n index=config[\"index_name\"],\n delete_chunk_size=config[\"partition_size\"],\n )\n\n\ndef _gather_deleted_transaction_keys(config: dict) -> Optional[Dict[Union[str, Any], Dict[str, Any]]]:\n \"\"\"\n Connect to S3 and gather all of the transaction ids stored in CSV files\n generated by the broker when transactions are removed from the DB.\n \"\"\"\n\n if not config[\"process_deletes\"]:\n logger.info(format_log(f\"Skipping the S3 CSV fetch for deleted transactions\", action=\"Delete\"))\n return None\n\n logger.info(format_log(f\"Gathering all deleted transactions from S3\", action=\"Delete\"))\n start = perf_counter()\n\n bucket_objects = retrieve_s3_bucket_object_list(bucket_name=config[\"s3_bucket\"])\n logger.info(format_log(f\"{len(bucket_objects):,} files found in bucket '{config['s3_bucket']}'\", action=\"Delete\"))\n\n if config[\"verbose\"]:\n logger.info(format_log(f\"CSV data from {config['starting_date']} to now\", action=\"Delete\"))\n\n filtered_csv_list = [\n x\n for x in bucket_objects\n if (x.key.endswith(\".csv\") and not x.key.startswith(\"staging\") and x.last_modified >= config[\"starting_date\"])\n ]\n\n if config[\"verbose\"]:\n logger.info(format_log(f\"Found {len(filtered_csv_list)} csv files\", action=\"Delete\"))\n\n deleted_keys = {}\n\n for obj in filtered_csv_list:\n object_data = access_s3_object(bucket_name=config[\"s3_bucket\"], obj=obj)\n\n # Ingests the CSV into a dataframe. pandas thinks some ids are dates, so disable parsing\n data = pd.read_csv(object_data, dtype=str)\n\n if \"detached_award_proc_unique\" in data:\n new_ids = [\"CONT_TX_\" + x.upper() for x in data[\"detached_award_proc_unique\"].values]\n elif \"afa_generated_unique\" in data:\n new_ids = [\"ASST_TX_\" + x.upper() for x in data[\"afa_generated_unique\"].values]\n else:\n msg = f\"[Missing valid CSV col] in {obj.key}\"\n logger.error(format_log(msg, action=\"Delete\"))\n raise RuntimeError(msg)\n\n for uid in new_ids:\n if uid in deleted_keys:\n if deleted_keys[uid][\"timestamp\"] < obj.last_modified:\n deleted_keys[uid][\"timestamp\"] = obj.last_modified\n else:\n deleted_keys[uid] = {\"timestamp\": obj.last_modified}\n\n if config[\"verbose\"]:\n for uid, deleted_dict in deleted_keys.items():\n logger.info(format_log(f\"id: {uid} last modified: {deleted_dict['timestamp']}\", action=\"Delete\"))\n\n logger.info(\n format_log(\n f\"Gathered {len(deleted_keys):,} deleted transactions from {len(filtered_csv_list)} files in \"\n f\"increment in {perf_counter() - start:.2f}s\",\n action=\"Delete\",\n )\n )\n return deleted_keys\n\n\ndef _check_awards_for_deletes(id_list: list) -> list:\n \"\"\"Takes a list of award key values and returns them if they are NOT found in the awards DB table\"\"\"\n formatted_value_ids = \"\"\n for x in id_list:\n formatted_value_ids += \"('\" + x + \"'),\"\n\n sql = \"\"\"\n SELECT x.generated_unique_award_id\n FROM (values {ids}) AS x(generated_unique_award_id)\n LEFT JOIN awards a ON a.generated_unique_award_id = x.generated_unique_award_id\n WHERE a.generated_unique_award_id IS NULL\"\"\"\n\n return execute_sql_statement(sql.format(ids=formatted_value_ids[:-1]), results=True)\n" ]
[ [ "pandas.read_csv" ] ]
NNHieu/cleanrl
[ "6869080f6ec53612734a9f592fc9872bb485f6d3" ]
[ "cleanrl_utils/paper_plot.py" ]
[ "import argparse\nimport os\nimport pickle\nfrom os import path\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport wandb\n\nsns.set_style(\"whitegrid\")\nmpl.rcParams[\"text.usetex\"] = True\nmpl.rcParams[\"text.latex.preamble\"] = r\"\\usepackage{amsmath}\" # for \\text command\n\nparser = argparse.ArgumentParser(description=\"CleanRL Plots\")\n# Common arguments\nparser.add_argument(\n \"--wandb-project\", type=str, default=\"cleanrl/cleanrl.benchmark\", help=\"the name of wandb project (e.g. cleanrl/cleanrl)\"\n)\nparser.add_argument(\n \"--feature-of-interest\", type=str, default=\"charts/episodic_return\", help=\"which feature to be plotted on the y-axis\"\n)\nparser.add_argument(\"--hyper-params-tuned\", nargs=\"+\", default=[], help=\"the hyper parameters tuned\")\n# parser.add_argument('--scan-history', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,\n# help='if toggled, cuda will not be enabled by default')\nparser.add_argument(\"--interested-exp-names\", nargs=\"+\", default=[], help=\"the hyper parameters tuned\")\nparser.add_argument(\"--samples\", type=int, default=500, help=\"the sampled point of the run\")\nparser.add_argument(\"--smooth-weight\", type=float, default=0.95, help=\"the weight parameter of the exponential moving average\")\nparser.add_argument(\n \"--last-n-episodes\",\n type=int,\n default=10,\n help=\"for analysis only; the last n episodes from which the mean of the feature of interest is calculated\",\n)\nparser.add_argument(\"--num-points-x-axis\", type=int, default=500, help=\"the number of points in the x-axis\")\nparser.add_argument(\"--font-size\", type=int, default=18, help=\"the font size of the plots\")\nparser.add_argument(\"--x-label\", type=str, default=\"Time Steps\", help=\"the label of x-axis\")\nparser.add_argument(\"--y-label\", type=str, default=\"Episodic Return\", help=\"the label of y-axis\")\nparser.add_argument(\"--y-lim-bottom\", type=float, default=0.0, help=\"the bottom limit for the y-axis\")\nparser.add_argument(\"--output-format\", type=str, default=\"pdf\", help=\"either `pdf`, `png`, or `svg`\")\nargs = parser.parse_args()\napi = wandb.Api()\n\n# hacks\nenv_dict = {\n # 'MicrortsAttackShapedReward-v1': 'MicrortsAttackHRL-v1',\n # 'MicrortsProduceCombatUnitsShapedReward-v1': 'MicrortsProduceCombatUnitHRL-v1',\n # 'MicrortsRandomEnemyShapedReward3-v1': 'MicrortsRandomEnemyHRL3-v1',\n}\nexp_convert_dict = {\n \"ppo_atari_visual\": \"PPO\",\n \"dqn_atari_visual\": \"DQN\",\n \"apex_dqn_atari_visual\": \"Ape-X DQN\",\n \"c51_atari_visual\": \"C51\",\n # 'rnd_ppo_gamma_0.999_nocliploss_lr_1e-4_128envs_entcoef_0.001_stickyaction': \"PPO RND\",\n \"ddpg_continuous_action\": \"DDPG\",\n # 'dqn': 'DQN',\n # 'ppg_procgen_fast': 'PPG',\n # 'ppg_procgen_impala_cnn': 'PPG-IMPALA-CNN',\n # 'ppo': \"PPO\",\n # 'ppo_car_racing': \"PPO\",\n \"ppo_continuous_action\": \"PPO\",\n # 'ppo_procgen_fast': \"PPO\",\n # \"ppo_procgen_impala_cnn\": \"PPO-IMPALA-CNN\",\n \"td3_continuous_action\": \"TD3\",\n}\n\n# args.feature_of_interest = 'charts/episodic_return'\nfeature_name = args.feature_of_interest.replace(\"/\", \"_\")\nif not os.path.exists(feature_name):\n os.makedirs(feature_name)\n with open(f\"{feature_name}/cache.pkl\", \"wb\") as handle:\n pickle.dump([[], [], [], {}, [], set([])], handle, protocol=pickle.HIGHEST_PROTOCOL)\nwith open(f\"{feature_name}/cache.pkl\", \"rb\") as handle:\n summary_list, config_list, name_list, envs, exp_names, ids = pickle.load(handle)\n\n# Change oreilly-class/cifar to <entity/project-name>\nruns = api.runs(args.wandb_project)\ndata = []\nfor idx, run in enumerate(runs):\n if run.id not in ids:\n ids.add(run.id)\n if args.feature_of_interest in run.summary:\n metrics_dataframe = run.history(keys=[args.feature_of_interest, \"global_step\"], samples=args.samples)\n exp_name = run.config[\"exp_name\"]\n for param in args.hyper_params_tuned:\n if param in run.config:\n exp_name += \"-\" + param + \"-\" + str(run.config[param]) + \"-\"\n\n metrics_dataframe.insert(len(metrics_dataframe.columns), \"algo\", exp_name)\n exp_names += [exp_name]\n metrics_dataframe.insert(len(metrics_dataframe.columns), \"seed\", run.config[\"seed\"])\n\n data += [metrics_dataframe]\n if run.config[\"env_id\"] not in envs:\n envs[run.config[\"env_id\"]] = [metrics_dataframe]\n envs[run.config[\"env_id\"] + \"total_timesteps\"] = run.config[\"total_timesteps\"]\n else:\n envs[run.config[\"env_id\"]] += [metrics_dataframe]\n\n # run.summary are the output key/values like accuracy. We call ._json_dict to omit large files\n summary_list.append(run.summary._json_dict)\n\n # run.config is the input metrics. We remove special values that start with _.\n config_list.append({k: v for k, v in run.config.items() if not k.startswith(\"_\")})\n\n # run.name is the name of the run.\n name_list.append(run.name)\n\n\nsummary_df = pd.DataFrame.from_records(summary_list)\nconfig_df = pd.DataFrame.from_records(config_list)\nname_df = pd.DataFrame({\"name\": name_list})\nall_df = pd.concat([name_df, config_df, summary_df], axis=1)\n# data = pd.concat(data, ignore_index=True)\nwith open(f\"{feature_name}/cache.pkl\", \"wb\") as handle:\n pickle.dump([summary_list, config_list, name_list, envs, exp_names, ids], handle, protocol=pickle.HIGHEST_PROTOCOL)\nprint(\"data loaded\")\n\n# https://stackoverflow.com/questions/42281844/what-is-the-mathematics-behind-the-smoothing-parameter-in-tensorboards-scalar#_=_\ndef smooth(scalars, weight): # Weight between 0 and 1\n last = scalars[0] # First value in the plot (first timestep)\n smoothed = list()\n for point in scalars:\n smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value\n smoothed.append(smoothed_val) # Save it\n last = smoothed_val # Anchor the last smoothed value\n\n return smoothed\n\n\n# smoothing\nfor env in envs:\n if not env.endswith(\"total_timesteps\"):\n for idx, metrics_dataframe in enumerate(envs[env]):\n envs[env][idx] = metrics_dataframe.dropna(subset=[args.feature_of_interest])\n# envs[env][idx][args.feature_of_interest] = smooth(metrics_dataframe[args.feature_of_interest], 0.85)\n\nsns.set(style=\"darkgrid\")\n\n\ndef get_df_for_env(env_id):\n env_total_timesteps = envs[env_id + \"total_timesteps\"]\n env_increment = env_total_timesteps / 500\n envs_same_x_axis = []\n for sampled_run in envs[env_id]:\n df = pd.DataFrame(columns=sampled_run.columns)\n x_axis = [i * env_increment for i in range(500 - 2)]\n current_row = 0\n for timestep in x_axis:\n while sampled_run.iloc[current_row][\"global_step\"] < timestep:\n current_row += 1\n if current_row > len(sampled_run) - 2:\n break\n if current_row > len(sampled_run) - 2:\n break\n temp_row = sampled_run.iloc[current_row].copy()\n temp_row[\"global_step\"] = timestep\n df = df.append(temp_row)\n\n envs_same_x_axis += [df]\n return pd.concat(envs_same_x_axis, ignore_index=True)\n\n\ndef export_legend(ax, filename=\"legend.pdf\"):\n try:\n # import matplotlib as mpl\n # mpl.rcParams['text.usetex'] = True\n # mpl.rcParams['text.latex.preamble'] = [r'\\usepackage{amsmath}'] #for \\text command\n fig2 = plt.figure()\n ax2 = fig2.add_subplot()\n ax2.axis(\"off\")\n handles, labels = ax.get_legend_handles_labels()\n\n legend = ax2.legend(\n handles=handles, labels=labels, frameon=False, loc=\"lower center\", ncol=6, fontsize=20, handlelength=1\n )\n for text in legend.get_texts():\n if text.get_text() in exp_convert_dict:\n text.set_text(exp_convert_dict[text.get_text()])\n text.set_text(text.get_text().replace(\"_\", \"-\"))\n for line in legend.get_lines():\n line.set_linewidth(4.0)\n fig = legend.figure\n fig.canvas.draw()\n\n bbox = legend.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n fig.savefig(filename, dpi=\"figure\", bbox_inches=bbox)\n fig.clf()\n except:\n print(f\"export legend failed: {filename}\")\n\n\nif not os.path.exists(f\"{feature_name}/data\"):\n os.makedirs(f\"{feature_name}/data\")\nif not os.path.exists(f\"{feature_name}/plots\"):\n os.makedirs(f\"{feature_name}/plots\")\nif not os.path.exists(f\"{feature_name}/legends\"):\n os.makedirs(f\"{feature_name}/legends\")\n\n\ninterested_exp_names = sorted(list(exp_convert_dict.keys())) # ['ppo_continuous_action', 'ppo_atari_visual']\npalette = sns.color_palette(n_colors=len(set(exp_convert_dict.values())))\npalette_dict = dict(zip(set(exp_convert_dict.values()), palette))\ncurrent_palette_dict = dict(zip(interested_exp_names, [palette_dict[exp_convert_dict[k]] for k in interested_exp_names]))\nif args.interested_exp_names:\n interested_exp_names = args.interested_exp_names\nprint(interested_exp_names)\n# raise\n# print(current_palette_dict)\n\nlegend_df = pd.DataFrame()\n\n# hack\nalgos_in_legend = []\n\nif args.font_size:\n plt.rc(\"axes\", titlesize=args.font_size) # fontsize of the axes title\n plt.rc(\"axes\", labelsize=args.font_size) # fontsize of the x and y labels\n plt.rc(\"xtick\", labelsize=args.font_size) # fontsize of the tick labels\n plt.rc(\"ytick\", labelsize=args.font_size) # fontsize of the tick labels\n plt.rc(\"legend\", fontsize=args.font_size) # legend fontsize\n\nstats = {item: [] for item in [\"env_id\", \"exp_name\", args.feature_of_interest]}\n# uncommenet the following to generate all figures\nfor env in set(all_df[\"env_id\"]):\n if not path.exists(f\"{feature_name}/data/{env}.pkl\"):\n with open(f\"{feature_name}/data/{env}.pkl\", \"wb\") as handle:\n data = get_df_for_env(env)\n data[\"seed\"] = data[\"seed\"].astype(float)\n data[args.feature_of_interest] = data[args.feature_of_interest].astype(float)\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n with open(f\"{feature_name}/data/{env}.pkl\", \"rb\") as handle:\n data = pickle.load(handle)\n print(f\"{env}'s data loaded\")\n\n def _smooth(df):\n df[args.feature_of_interest] = smooth(list(df[args.feature_of_interest]), args.smooth_weight)\n return df\n\n plot_data = data.groupby([\"seed\", \"algo\"]).apply(_smooth).loc[data[\"algo\"].isin(interested_exp_names)]\n if len(plot_data) == 0:\n continue\n ax = sns.lineplot(\n data=plot_data, x=\"global_step\", y=args.feature_of_interest, hue=\"algo\", ci=\"sd\", palette=current_palette_dict\n )\n ax.ticklabel_format(style=\"sci\", scilimits=(0, 0), axis=\"x\")\n ax.set(xlabel=args.x_label, ylabel=args.y_label)\n ax.legend().remove()\n if args.y_lim_bottom:\n plt.ylim(bottom=args.y_lim_bottom)\n plt.title(env)\n plt.tight_layout()\n plt.savefig(f\"{feature_name}/plots/{env}.{args.output_format}\")\n plt.clf()\n\n env_algos = data[\"algo\"].unique()\n for algo in env_algos:\n algo_data = data.loc[data[\"algo\"].isin([algo])]\n last_n_episodes_global_step = sorted(algo_data[\"global_step\"].unique())[-args.last_n_episodes]\n last_n_episodes_features = (\n algo_data[algo_data[\"global_step\"] > last_n_episodes_global_step]\n .groupby([\"seed\"])\n .mean()[args.feature_of_interest]\n )\n\n for item in last_n_episodes_features:\n stats[args.feature_of_interest] += [item]\n if algo in exp_convert_dict:\n stats[\"exp_name\"] += [exp_convert_dict[algo]]\n else:\n stats[\"exp_name\"] += [algo]\n stats[\"env_id\"] += [env]\n\n # export legend\n # legend_df = pd.DataFrame()\n # legend_df = legend_df.append(plot_data)\n # legend_df = legend_df.reset_index()\n # ax = sns.lineplot(data=legend_df, x=\"global_step\", y=args.feature_of_interest, hue=\"algo\", ci='sd', palette=current_palette_dict)\n # ax.set(xlabel=args.x_label, ylabel=args.y_label)\n # ax.legend().remove()\n # export_legend(ax, f\"{feature_name}/legends/{env}.{args.output_format}\")\n # plt.clf()\n\n # hack\n algo_in_legend = exp_convert_dict[plot_data[\"algo\"].iloc[0]]\n if algo_in_legend not in algos_in_legend:\n legend_df = legend_df.append(plot_data.iloc[:5])\n algos_in_legend += [algo_in_legend]\n\nlegend_df = legend_df.reset_index()\nax = sns.lineplot(\n data=legend_df, x=\"global_step\", y=args.feature_of_interest, hue=\"algo\", ci=\"sd\", palette=current_palette_dict\n)\nax.set(xlabel=args.x_label, ylabel=args.y_label)\nax.legend().remove()\nexport_legend(ax, f\"{feature_name}/legend.{args.output_format}\")\nplt.clf()\n\n\n# analysis\nstats_df = pd.DataFrame(stats)\ng = stats_df.groupby([\"env_id\", \"exp_name\"]).agg(lambda x: f\"{np.mean(x):.2f} ± {np.std(x):.2f}\")\nprint(g.reset_index().pivot(\"exp_name\", \"env_id\", args.feature_of_interest).to_latex().replace(\"±\", \"$\\pm$\"))\n" ]
[ [ "pandas.DataFrame.from_records", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "numpy.mean", "matplotlib.pyplot.rc", "matplotlib.pyplot.figure", "numpy.std", "matplotlib.pyplot.tight_layout", "pandas.concat", "matplotlib.pyplot.clf" ] ]
Mars-Rover-Manipal/Active-Suspension
[ "4a73422def896f516b40616f169aae3923f221e1" ]
[ "gym-gazebo/gym_gazebo/envs/lsd_rover/lsd_rover_active_suspension.py" ]
[ "from math import degrees, inf, radians\nimport rospy\nimport numpy as np\nfrom std_msgs.msg import Float64\nfrom nav_msgs.msg import Odometry\nfrom sensor_msgs.msg import Range\nfrom geometry_msgs.msg import Twist\nfrom gazebo_msgs.msg import LinkStates\nfrom std_srvs.srv import Empty\nfrom gym import utils, spaces\nfrom tf.transformations import euler_from_quaternion\nfrom gym_gazebo.envs import gazebo_env\nfrom sensor_msgs.msg import Imu, LaserScan\nimport rospkg\nfrom gazebo_msgs.msg import ModelState\nfrom gazebo_msgs.srv import SetModelState\nimport time\nimport tf2_ros\nimport tf2_geometry_msgs\nfrom tf2_geometry_msgs import PoseStamped\nfrom random import randint\nimport torch\nfrom gazebo_msgs.msg import ModelStates\nrospack = rospkg.RosPack()\n\n\nclass LsdEnv(gazebo_env.GazeboEnv):\n def __init__(self):\n gazebo_env.GazeboEnv.__init__(self, \"custom_world.launch\")\n\n self.pitch = 0\n self.chassis_rise = 0.0\n self.counter=0\n self.roll = 0\n self.yaw = 0\n self.centroid = 0\n self.y_displacement = 0\n self.x_displacement = 0\n self.ground_clearance = 0\n self.reward = 0\n self.observation_space = spaces.Box(low=-50, high=50, shape=(4,), dtype=np.float32)\n self.orientation_list = []\n self.action_space = spaces.Box(low=-1, high=1, shape=(4,), dtype=np.float32)\n self.obstacle_distance = 0\n self.obstacle_height = 0\n self.step_height=0\n self.obstacle_offset = 0\n self.chassis_angle = 0\n self.actual_speed = 0\n self.done = False\n self.package_path = rospack.get_path('lsd')\n\n rospy.Subscriber(\"/imu\", Imu, self.callback_imu)\n\n rospy.Subscriber(\"/odom\", Odometry, self.callback_pose)\n\n rospy.Subscriber(\"/gazebo/model_states\", ModelStates, self.callback_chassis_rise)\n\n self.velocity_publisher = rospy.Publisher(\"/cmd_vel\", Twist, queue_size=10)\n\n self.fl_joint_pub = rospy.Publisher(\"/lsd/fl_joint_position_controller/command\",\n Float64, queue_size=10)\n self.fr_joint_pub = rospy.Publisher(\"/lsd/fr_joint_position_controller/command\",\n Float64, queue_size=10)\n self.bl_joint_pub = rospy.Publisher(\"/lsd/bl_joint_position_controller/command\",\n Float64, queue_size=10)\n self.br_joint_pub = rospy.Publisher(\"/lsd/br_joint_position_controller/command\",\n Float64, queue_size=10)\n\n self.pause = rospy.ServiceProxy(\"/gazebo/pause_physics\", Empty)\n self.unpause = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)\n self.reset_proxy = rospy.ServiceProxy('/gazebo/reset_simulation', Empty)\n rospy.Subscriber(\"/centroid_point\", PoseStamped, self.callback_point)\n self.rate = rospy.Rate(50)\n\n def forward(self):\n\n vel_cmd = Twist()\n vel_cmd.linear.x = -0.7\n vel_cmd.linear.y = 0\n vel_cmd.angular.z = 0\n\n self.velocity_publisher.publish(vel_cmd)\n\n def teleport(self):\n\n state_msg = ModelState()\n state_msg.model_name = 'lsd'\n state_msg.pose.position.x = 0\n state_msg.pose.position.y = 0\n state_msg.pose.position.z = 0.5\n state_msg.pose.orientation.x = 0\n state_msg.pose.orientation.y = 0\n state_msg.pose.orientation.z = 0\n state_msg.pose.orientation.w = 1\n\n step = ModelState()\n self.step_height = randint(25, 32)\n print(\"\\033[0;32m\\nOBSTACLE HEIGHT = %scm\\033[0m\" % self.step_height)\n\n step.model_name = 'step1'\n step.pose.position.x = 5\n step.pose.position.y = 0\n step.pose.position.z = (float(self.step_height) / 100.) - 0.16\n step.pose.orientation.x = 0\n step.pose.orientation.y = 0\n step.pose.orientation.z = 0\n step.pose.orientation.w = 1\n\n rospy.wait_for_service('/gazebo/set_model_state')\n try:\n set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)\n set_state(state_msg)\n set_state(step)\n\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\" % e)\n\n def callback_imu(self, msg):\n\n self.orientation_list = [msg.orientation.x, msg.orientation.y, msg.orientation.z,\n msg.orientation.w]\n (self.roll, self.pitch, self.yaw) = euler_from_quaternion(self.orientation_list)\n self.pitch = degrees(self.pitch)\n self.roll = degrees(self.roll)\n self.yaw = degrees(self.yaw)\n self.counter+=1\n\n def callback_chassis_rise(self, msg):\n try:\n self.chassis_rise = msg.pose[msg.name.index('lsd')].position.z\n # print(self.chassis_rise)\n \n except ValueError:\n pass\n\n def callback_point(self, msg):\n self.centroid = msg\n\n def callback_pose(self, msg):\n self.actual_speed = msg.twist.twist.linear.x\n self.y_displacement = msg.pose.pose.position.y\n self.x_displacement = msg.pose.pose.position.x\n\n def descretize_func(self, tu):\n for i in range(len(tu)):\n tu[i] = round(tu[i], 1)\n\n return tu\n\n def get_observation(self):\n\n observation = [self.pitch, self.roll, self.x_displacement, self.step_height]\n return observation\n\n def step(self, action):\n\n rospy.wait_for_service('/gazebo/unpause_physics')\n try:\n self.unpause()\n except rospy.ServiceException:\n print(\"/gazebo/unpause_physics service call failed\")\n\n self.forward()\n\n if 2.9< self.x_displacement <3.3:\n\n\n action[2]=action[2]*37\n action[3]=action[3]*37\n\n self.fl_joint_pub.publish(radians(0))\n self.fr_joint_pub.publish(radians(0))\n self.bl_joint_pub.publish(radians(abs(action[2])))\n self.br_joint_pub.publish(radians(abs(action[3])))\n\n time.sleep(0.5)\n\n time.sleep(5)\n\n action[0] = -abs(action[0]*37)\n action[1] = -abs(action[1]*37)\n\n self.fl_joint_pub.publish(radians(0))\n self.fr_joint_pub.publish(radians(0))\n self.bl_joint_pub.publish(radians(action[0]))\n self.br_joint_pub.publish(radians(action[1]))\n\n time.sleep(0.5)\n\n time.sleep(3)\n\n self.fl_joint_pub.publish(radians(10))\n self.fr_joint_pub.publish(radians(10))\n self.bl_joint_pub.publish(radians(0))\n self.br_joint_pub.publish(radians(0))\n\n time.sleep(2)\n\n self.fl_joint_pub.publish(radians(0))\n self.fr_joint_pub.publish(radians(0))\n self.bl_joint_pub.publish(radians(0))\n self.br_joint_pub.publish(radians(0))\n\n time.sleep(1)\n\n\n observation_ = self.get_observation()\n\n self.get_reward()\n # print(np.array(observation_,dtype=np.float32), self.reward, self.done)\n return np.array(observation_), self.reward, self.done, {}\n\n def get_reward(self):\n if abs(self.pitch) > 20:\n self.done = True\n self.reward = -100\n if abs(self.yaw)>10:\n self.reward=-100\n if abs(self.x_displacement)>3.6:\n self.reward= 100\n self.done=True\n if(self.counter>430 and self.x_displacement<3.3):\n self.reward=-50\n self.done=True\n if(self.counter>430):\n self.done=True\n\n\n\n def reset(self):\n\n self.done = False\n self.reward=0\n self.counter=0\n self.teleport()\n vel_cmd = Twist()\n vel_cmd.linear.x = 0\n vel_cmd.angular.z = 0\n\n self.velocity_publisher.publish(vel_cmd)\n\n self.fl_joint_pub.publish(0)\n self.fr_joint_pub.publish(0)\n self.bl_joint_pub.publish(0)\n self.br_joint_pub.publish(0)\n\n time.sleep(2)\n\n # unpause simulation to make an observation and reset the values\n rospy.wait_for_service('/gazebo/unpause_physics')\n try:\n # resp_pause = pause.call()\n self.unpause()\n except rospy.ServiceException:\n print(\"/gazebo/unpause_physics service call failed\")\n\n initial_reading = self.get_observation()\n\n rospy.wait_for_service('/gazebo/pause_physics')\n try:\n self.pause()\n except rospy.ServiceException:\n print(\"/gazebo/pause_physics service call failed\")\n\n return np.array(initial_reading, dtype=np.float32)" ]
[ [ "numpy.array" ] ]
ranocha/BSeries
[ "de42bf03280ef7860ae6ade1d20212906b37a3d4" ]
[ "BSeries/util.py" ]
[ "import numpy as np\n\ndef object_einsum(string, *arrays):\n \"\"\"\n Simplified object einsum, without much error checking.\n Stolen from https://gist.github.com/seberg/5236560, with some modifications.\n We can remove this and rely directly on np.einsum after the related\n PR is merged to numpy.\n \"\"\"\n import copy\n try:\n return np.einsum(string, *arrays)\n except TypeError:\n pass\n \n s = string.split('->')\n in_op = s[0].split(',')\n out_op = None if len(s) == 1 else s[1].replace(' ', '')\n\n in_op = [axes.replace(' ', '') for axes in in_op]\n all_axes = set()\n repeated_axes = set()\n \n for axes in in_op:\n list(repeated_axes.update(ax) for ax in axes if ax in all_axes)\n all_axes.update(axes)\n\n if out_op is None:\n out_op = set(sorted(all_axes))\n list(out_op.discard(rep_ax) for rep_ax in repeated_axes)\n else:\n all_axes.update(out_op)\n \n perm_dict = {_[1]: _[0] for _ in enumerate(all_axes)}\n \n dims = len(perm_dict)\n op_axes = []\n for axes in (in_op + list((out_op,))):\n op = [-1] * dims\n for i, ax in enumerate(axes):\n op[perm_dict[ax]] = i\n op_axes.append(op)\n \n op_flags = [('readonly',)] * len(in_op) + [('readwrite', 'allocate')]\n dtypes = [np.object_] * (len(in_op) + 1) # cast all to object\n\n nditer = np.nditer(arrays + (None,), op_axes=op_axes, flags=['buffered', 'delay_bufalloc', 'reduce_ok', 'grow_inner', 'refs_ok'], op_dtypes=dtypes, op_flags=op_flags)\n\n nditer.operands[-1][...] = 0\n nditer.reset()\n \n for vals in nditer:\n out = vals[-1]\n prod = copy.deepcopy(vals[0])\n #prod = vals[0]\n for value in vals[1:-1]:\n prod *= value\n out += prod\n \n return nditer.operands[-1]\n\n\n" ]
[ [ "numpy.nditer", "numpy.einsum" ] ]
nikkigabbard/sympy
[ "1819379bdcca733eabc635ca4b3c9ae3deff1205" ]
[ "sympy/matrices/dense.py" ]
[ "from __future__ import division, print_function\n\nimport random\n\nfrom sympy.core import SympifyError\nfrom sympy.core.basic import Basic\nfrom sympy.core.compatibility import is_sequence, range, reduce\nfrom sympy.core.expr import Expr\nfrom sympy.core.function import count_ops, expand_mul\nfrom sympy.core.singleton import S\nfrom sympy.core.symbol import Symbol\nfrom sympy.core.sympify import sympify\nfrom sympy.functions.elementary.miscellaneous import sqrt\nfrom sympy.functions.elementary.trigonometric import cos, sin\nfrom sympy.matrices.common import a2idx, classof\nfrom sympy.matrices.matrices import MatrixBase, ShapeError\nfrom sympy.simplify import simplify as _simplify\nfrom sympy.utilities.decorator import doctest_depends_on\nfrom sympy.utilities.misc import filldedent\n\n\ndef _iszero(x):\n \"\"\"Returns True if x is zero.\"\"\"\n return x.is_zero\n\n\ndef _compare_sequence(a, b):\n \"\"\"Compares the elements of a list/tuple `a`\n and a list/tuple `b`. `_compare_sequence((1,2), [1, 2])`\n is True, whereas `(1,2) == [1, 2]` is False\"\"\"\n if type(a) is type(b):\n # if they are the same type, compare directly\n return a == b\n # there is no overhead for calling `tuple` on a\n # tuple\n return tuple(a) == tuple(b)\n\nclass DenseMatrix(MatrixBase):\n\n is_MatrixExpr = False\n\n _op_priority = 10.01\n _class_priority = 4\n\n def __eq__(self, other):\n other = sympify(other)\n self_shape = getattr(self, 'shape', None)\n other_shape = getattr(other, 'shape', None)\n if None in (self_shape, other_shape):\n return False\n if self_shape != other_shape:\n return False\n if isinstance(other, Matrix):\n return _compare_sequence(self._mat, other._mat)\n elif isinstance(other, MatrixBase):\n return _compare_sequence(self._mat, Matrix(other)._mat)\n\n def __getitem__(self, key):\n \"\"\"Return portion of self defined by key. If the key involves a slice\n then a list will be returned (if key is a single slice) or a matrix\n (if key was a tuple involving a slice).\n\n Examples\n ========\n\n >>> from sympy import Matrix, I\n >>> m = Matrix([\n ... [1, 2 + I],\n ... [3, 4 ]])\n\n If the key is a tuple that doesn't involve a slice then that element\n is returned:\n\n >>> m[1, 0]\n 3\n\n When a tuple key involves a slice, a matrix is returned. Here, the\n first column is selected (all rows, column 0):\n\n >>> m[:, 0]\n Matrix([\n [1],\n [3]])\n\n If the slice is not a tuple then it selects from the underlying\n list of elements that are arranged in row order and a list is\n returned if a slice is involved:\n\n >>> m[0]\n 1\n >>> m[::2]\n [1, 3]\n \"\"\"\n if isinstance(key, tuple):\n i, j = key\n try:\n i, j = self.key2ij(key)\n return self._mat[i*self.cols + j]\n except (TypeError, IndexError):\n if (isinstance(i, Expr) and not i.is_number) or (isinstance(j, Expr) and not j.is_number):\n if ((j < 0) is True) or ((j >= self.shape[1]) is True) or\\\n ((i < 0) is True) or ((i >= self.shape[0]) is True):\n raise ValueError(\"index out of boundary\")\n from sympy.matrices.expressions.matexpr import MatrixElement\n return MatrixElement(self, i, j)\n\n if isinstance(i, slice):\n # XXX remove list() when PY2 support is dropped\n i = list(range(self.rows))[i]\n elif is_sequence(i):\n pass\n else:\n i = [i]\n if isinstance(j, slice):\n # XXX remove list() when PY2 support is dropped\n j = list(range(self.cols))[j]\n elif is_sequence(j):\n pass\n else:\n j = [j]\n return self.extract(i, j)\n else:\n # row-wise decomposition of matrix\n if isinstance(key, slice):\n return self._mat[key]\n return self._mat[a2idx(key)]\n\n def __setitem__(self, key, value):\n raise NotImplementedError()\n\n def _cholesky(self, hermitian=True):\n \"\"\"Helper function of cholesky.\n Without the error checks.\n To be used privately.\n Implements the Cholesky-Banachiewicz algorithm.\n Returns L such that L*L.H == self if hermitian flag is True,\n or L*L.T == self if hermitian is False.\n \"\"\"\n L = zeros(self.rows, self.rows)\n if hermitian:\n for i in range(self.rows):\n for j in range(i):\n L[i, j] = (1 / L[j, j])*expand_mul(self[i, j] -\n sum(L[i, k]*L[j, k].conjugate() for k in range(j)))\n Lii2 = expand_mul(self[i, i] -\n sum(L[i, k]*L[i, k].conjugate() for k in range(i)))\n if Lii2.is_positive is False:\n raise ValueError(\"Matrix must be positive-definite\")\n L[i, i] = sqrt(Lii2)\n else:\n for i in range(self.rows):\n for j in range(i):\n L[i, j] = (1 / L[j, j])*(self[i, j] -\n sum(L[i, k]*L[j, k] for k in range(j)))\n L[i, i] = sqrt(self[i, i] -\n sum(L[i, k]**2 for k in range(i)))\n return self._new(L)\n\n def _diagonal_solve(self, rhs):\n \"\"\"Helper function of function diagonal_solve,\n without the error checks, to be used privately.\n \"\"\"\n return self._new(rhs.rows, rhs.cols, lambda i, j: rhs[i, j] / self[i, i])\n\n def _eval_add(self, other):\n # we assume both arguments are dense matrices since\n # sparse matrices have a higher priority\n mat = [a + b for a,b in zip(self._mat, other._mat)]\n return classof(self, other)._new(self.rows, self.cols, mat, copy=False)\n\n def _eval_extract(self, rowsList, colsList):\n mat = self._mat\n cols = self.cols\n indices = (i * cols + j for i in rowsList for j in colsList)\n return self._new(len(rowsList), len(colsList),\n list(mat[i] for i in indices), copy=False)\n\n def _eval_matrix_mul(self, other):\n from sympy import Add\n # cache attributes for faster access\n self_rows, self_cols = self.rows, self.cols\n other_rows, other_cols = other.rows, other.cols\n other_len = other_rows * other_cols\n new_mat_rows = self.rows\n new_mat_cols = other.cols\n\n # preallocate the array\n new_mat = [S.Zero]*new_mat_rows*new_mat_cols\n\n # if we multiply an n x 0 with a 0 x m, the\n # expected behavior is to produce an n x m matrix of zeros\n if self.cols != 0 and other.rows != 0:\n # cache self._mat and other._mat for performance\n mat = self._mat\n other_mat = other._mat\n for i in range(len(new_mat)):\n row, col = i // new_mat_cols, i % new_mat_cols\n row_indices = range(self_cols*row, self_cols*(row+1))\n col_indices = range(col, other_len, other_cols)\n vec = (mat[a]*other_mat[b] for a,b in zip(row_indices, col_indices))\n try:\n new_mat[i] = Add(*vec)\n except (TypeError, SympifyError):\n # Block matrices don't work with `sum` or `Add` (ISSUE #11599)\n # They don't work with `sum` because `sum` tries to add `0`\n # initially, and for a matrix, that is a mix of a scalar and\n # a matrix, which raises a TypeError. Fall back to a\n # block-matrix-safe way to multiply if the `sum` fails.\n vec = (mat[a]*other_mat[b] for a,b in zip(row_indices, col_indices))\n new_mat[i] = reduce(lambda a,b: a + b, vec)\n return classof(self, other)._new(new_mat_rows, new_mat_cols, new_mat, copy=False)\n\n def _eval_matrix_mul_elementwise(self, other):\n mat = [a*b for a,b in zip(self._mat, other._mat)]\n return classof(self, other)._new(self.rows, self.cols, mat, copy=False)\n\n def _eval_inverse(self, **kwargs):\n \"\"\"Return the matrix inverse using the method indicated (default\n is Gauss elimination).\n\n kwargs\n ======\n\n method : ('GE', 'LU', or 'ADJ')\n iszerofunc\n try_block_diag\n\n Notes\n =====\n\n According to the ``method`` keyword, it calls the appropriate method:\n\n GE .... inverse_GE(); default\n LU .... inverse_LU()\n ADJ ... inverse_ADJ()\n\n According to the ``try_block_diag`` keyword, it will try to form block\n diagonal matrices using the method get_diag_blocks(), invert these\n individually, and then reconstruct the full inverse matrix.\n\n Note, the GE and LU methods may require the matrix to be simplified\n before it is inverted in order to properly detect zeros during\n pivoting. In difficult cases a custom zero detection function can\n be provided by setting the ``iszerosfunc`` argument to a function that\n should return True if its argument is zero. The ADJ routine computes\n the determinant and uses that to detect singular matrices in addition\n to testing for zeros on the diagonal.\n\n See Also\n ========\n\n inverse_LU\n inverse_GE\n inverse_ADJ\n \"\"\"\n from sympy.matrices import diag\n\n method = kwargs.get('method', 'GE')\n iszerofunc = kwargs.get('iszerofunc', _iszero)\n if kwargs.get('try_block_diag', False):\n blocks = self.get_diag_blocks()\n r = []\n for block in blocks:\n r.append(block.inv(method=method, iszerofunc=iszerofunc))\n return diag(*r)\n\n M = self.as_mutable()\n if method == \"GE\":\n rv = M.inverse_GE(iszerofunc=iszerofunc)\n elif method == \"LU\":\n rv = M.inverse_LU(iszerofunc=iszerofunc)\n elif method == \"ADJ\":\n rv = M.inverse_ADJ(iszerofunc=iszerofunc)\n else:\n # make sure to add an invertibility check (as in inverse_LU)\n # if a new method is added.\n raise ValueError(\"Inversion method unrecognized\")\n return self._new(rv)\n\n def _eval_scalar_mul(self, other):\n mat = [other*a for a in self._mat]\n return self._new(self.rows, self.cols, mat, copy=False)\n\n def _eval_scalar_rmul(self, other):\n mat = [a*other for a in self._mat]\n return self._new(self.rows, self.cols, mat, copy=False)\n\n def _eval_tolist(self):\n mat = list(self._mat)\n cols = self.cols\n return [mat[i*cols:(i + 1)*cols] for i in range(self.rows)]\n\n def _LDLdecomposition(self, hermitian=True):\n \"\"\"Helper function of LDLdecomposition.\n Without the error checks.\n To be used privately.\n Returns L and D such that L*D*L.H == self if hermitian flag is True,\n or L*D*L.T == self if hermitian is False.\n \"\"\"\n # https://en.wikipedia.org/wiki/Cholesky_decomposition#LDL_decomposition_2\n D = zeros(self.rows, self.rows)\n L = eye(self.rows)\n if hermitian:\n for i in range(self.rows):\n for j in range(i):\n L[i, j] = (1 / D[j, j])*expand_mul(self[i, j] - sum(\n L[i, k]*L[j, k].conjugate()*D[k, k] for k in range(j)))\n D[i, i] = expand_mul(self[i, i] -\n sum(L[i, k]*L[i, k].conjugate()*D[k, k] for k in range(i)))\n if D[i, i].is_positive is False:\n raise ValueError(\"Matrix must be positive-definite\")\n else:\n for i in range(self.rows):\n for j in range(i):\n L[i, j] = (1 / D[j, j])*(self[i, j] - sum(\n L[i, k]*L[j, k]*D[k, k] for k in range(j)))\n D[i, i] = self[i, i] - sum(L[i, k]**2*D[k, k] for k in range(i))\n return self._new(L), self._new(D)\n\n def _lower_triangular_solve(self, rhs):\n \"\"\"Helper function of function lower_triangular_solve.\n Without the error checks.\n To be used privately.\n \"\"\"\n X = zeros(self.rows, rhs.cols)\n for j in range(rhs.cols):\n for i in range(self.rows):\n if self[i, i] == 0:\n raise TypeError(\"Matrix must be non-singular.\")\n X[i, j] = (rhs[i, j] - sum(self[i, k]*X[k, j]\n for k in range(i))) / self[i, i]\n return self._new(X)\n\n def _upper_triangular_solve(self, rhs):\n \"\"\"Helper function of function upper_triangular_solve.\n Without the error checks, to be used privately. \"\"\"\n X = zeros(self.rows, rhs.cols)\n for j in range(rhs.cols):\n for i in reversed(range(self.rows)):\n if self[i, i] == 0:\n raise ValueError(\"Matrix must be non-singular.\")\n X[i, j] = (rhs[i, j] - sum(self[i, k]*X[k, j]\n for k in range(i + 1, self.rows))) / self[i, i]\n return self._new(X)\n\n def as_immutable(self):\n \"\"\"Returns an Immutable version of this Matrix\n \"\"\"\n from .immutable import ImmutableDenseMatrix as cls\n if self.rows and self.cols:\n return cls._new(self.tolist())\n return cls._new(self.rows, self.cols, [])\n\n def as_mutable(self):\n \"\"\"Returns a mutable version of this matrix\n\n Examples\n ========\n\n >>> from sympy import ImmutableMatrix\n >>> X = ImmutableMatrix([[1, 2], [3, 4]])\n >>> Y = X.as_mutable()\n >>> Y[1, 1] = 5 # Can set values in Y\n >>> Y\n Matrix([\n [1, 2],\n [3, 5]])\n \"\"\"\n return Matrix(self)\n\n def equals(self, other, failing_expression=False):\n \"\"\"Applies ``equals`` to corresponding elements of the matrices,\n trying to prove that the elements are equivalent, returning True\n if they are, False if any pair is not, and None (or the first\n failing expression if failing_expression is True) if it cannot\n be decided if the expressions are equivalent or not. This is, in\n general, an expensive operation.\n\n Examples\n ========\n\n >>> from sympy.matrices import Matrix\n >>> from sympy.abc import x\n >>> from sympy import cos\n >>> A = Matrix([x*(x - 1), 0])\n >>> B = Matrix([x**2 - x, 0])\n >>> A == B\n False\n >>> A.simplify() == B.simplify()\n True\n >>> A.equals(B)\n True\n >>> A.equals(2)\n False\n\n See Also\n ========\n sympy.core.expr.equals\n \"\"\"\n self_shape = getattr(self, 'shape', None)\n other_shape = getattr(other, 'shape', None)\n if None in (self_shape, other_shape):\n return False\n if self_shape != other_shape:\n return False\n rv = True\n for i in range(self.rows):\n for j in range(self.cols):\n ans = self[i, j].equals(other[i, j], failing_expression)\n if ans is False:\n return False\n elif ans is not True and rv is True:\n rv = ans\n return rv\n\n\ndef _force_mutable(x):\n \"\"\"Return a matrix as a Matrix, otherwise return x.\"\"\"\n if getattr(x, 'is_Matrix', False):\n return x.as_mutable()\n elif isinstance(x, Basic):\n return x\n elif hasattr(x, '__array__'):\n a = x.__array__()\n if len(a.shape) == 0:\n return sympify(a)\n return Matrix(x)\n return x\n\n\nclass MutableDenseMatrix(DenseMatrix, MatrixBase):\n def __new__(cls, *args, **kwargs):\n return cls._new(*args, **kwargs)\n\n @classmethod\n def _new(cls, *args, **kwargs):\n # if the `copy` flag is set to False, the input\n # was rows, cols, [list]. It should be used directly\n # without creating a copy.\n if kwargs.get('copy', True) is False:\n if len(args) != 3:\n raise TypeError(\"'copy=False' requires a matrix be initialized as rows,cols,[list]\")\n rows, cols, flat_list = args\n else:\n rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)\n flat_list = list(flat_list) # create a shallow copy\n self = object.__new__(cls)\n self.rows = rows\n self.cols = cols\n self._mat = flat_list\n return self\n\n def __setitem__(self, key, value):\n \"\"\"\n\n Examples\n ========\n\n >>> from sympy import Matrix, I, zeros, ones\n >>> m = Matrix(((1, 2+I), (3, 4)))\n >>> m\n Matrix([\n [1, 2 + I],\n [3, 4]])\n >>> m[1, 0] = 9\n >>> m\n Matrix([\n [1, 2 + I],\n [9, 4]])\n >>> m[1, 0] = [[0, 1]]\n\n To replace row r you assign to position r*m where m\n is the number of columns:\n\n >>> M = zeros(4)\n >>> m = M.cols\n >>> M[3*m] = ones(1, m)*2; M\n Matrix([\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [2, 2, 2, 2]])\n\n And to replace column c you can assign to position c:\n\n >>> M[2] = ones(m, 1)*4; M\n Matrix([\n [0, 0, 4, 0],\n [0, 0, 4, 0],\n [0, 0, 4, 0],\n [2, 2, 4, 2]])\n \"\"\"\n rv = self._setitem(key, value)\n if rv is not None:\n i, j, value = rv\n self._mat[i*self.cols + j] = value\n\n def as_mutable(self):\n return self.copy()\n\n def col_del(self, i):\n \"\"\"Delete the given column.\n\n Examples\n ========\n\n >>> from sympy.matrices import eye\n >>> M = eye(3)\n >>> M.col_del(1)\n >>> M\n Matrix([\n [1, 0],\n [0, 0],\n [0, 1]])\n\n See Also\n ========\n\n col\n row_del\n \"\"\"\n if i < -self.cols or i >= self.cols:\n raise IndexError(\"Index out of range: 'i=%s', valid -%s <= i < %s\"\n % (i, self.cols, self.cols))\n for j in range(self.rows - 1, -1, -1):\n del self._mat[i + j*self.cols]\n self.cols -= 1\n\n def col_op(self, j, f):\n \"\"\"In-place operation on col j using two-arg functor whose args are\n interpreted as (self[i, j], i).\n\n Examples\n ========\n\n >>> from sympy.matrices import eye\n >>> M = eye(3)\n >>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M\n Matrix([\n [1, 2, 0],\n [0, 1, 0],\n [0, 0, 1]])\n\n See Also\n ========\n col\n row_op\n \"\"\"\n self._mat[j::self.cols] = [f(*t) for t in list(zip(self._mat[j::self.cols], list(range(self.rows))))]\n\n def col_swap(self, i, j):\n \"\"\"Swap the two given columns of the matrix in-place.\n\n Examples\n ========\n\n >>> from sympy.matrices import Matrix\n >>> M = Matrix([[1, 0], [1, 0]])\n >>> M\n Matrix([\n [1, 0],\n [1, 0]])\n >>> M.col_swap(0, 1)\n >>> M\n Matrix([\n [0, 1],\n [0, 1]])\n\n See Also\n ========\n\n col\n row_swap\n \"\"\"\n for k in range(0, self.rows):\n self[k, i], self[k, j] = self[k, j], self[k, i]\n\n def copyin_list(self, key, value):\n \"\"\"Copy in elements from a list.\n\n Parameters\n ==========\n\n key : slice\n The section of this matrix to replace.\n value : iterable\n The iterable to copy values from.\n\n Examples\n ========\n\n >>> from sympy.matrices import eye\n >>> I = eye(3)\n >>> I[:2, 0] = [1, 2] # col\n >>> I\n Matrix([\n [1, 0, 0],\n [2, 1, 0],\n [0, 0, 1]])\n >>> I[1, :2] = [[3, 4]]\n >>> I\n Matrix([\n [1, 0, 0],\n [3, 4, 0],\n [0, 0, 1]])\n\n See Also\n ========\n\n copyin_matrix\n \"\"\"\n if not is_sequence(value):\n raise TypeError(\"`value` must be an ordered iterable, not %s.\" % type(value))\n return self.copyin_matrix(key, Matrix(value))\n\n def copyin_matrix(self, key, value):\n \"\"\"Copy in values from a matrix into the given bounds.\n\n Parameters\n ==========\n\n key : slice\n The section of this matrix to replace.\n value : Matrix\n The matrix to copy values from.\n\n Examples\n ========\n\n >>> from sympy.matrices import Matrix, eye\n >>> M = Matrix([[0, 1], [2, 3], [4, 5]])\n >>> I = eye(3)\n >>> I[:3, :2] = M\n >>> I\n Matrix([\n [0, 1, 0],\n [2, 3, 0],\n [4, 5, 1]])\n >>> I[0, 1] = M\n >>> I\n Matrix([\n [0, 0, 1],\n [2, 2, 3],\n [4, 4, 5]])\n\n See Also\n ========\n\n copyin_list\n \"\"\"\n rlo, rhi, clo, chi = self.key2bounds(key)\n shape = value.shape\n dr, dc = rhi - rlo, chi - clo\n if shape != (dr, dc):\n raise ShapeError(filldedent(\"The Matrix `value` doesn't have the \"\n \"same dimensions \"\n \"as the in sub-Matrix given by `key`.\"))\n\n for i in range(value.rows):\n for j in range(value.cols):\n self[i + rlo, j + clo] = value[i, j]\n\n def fill(self, value):\n \"\"\"Fill the matrix with the scalar value.\n\n See Also\n ========\n\n zeros\n ones\n \"\"\"\n self._mat = [value]*len(self)\n\n def row_del(self, i):\n \"\"\"Delete the given row.\n\n Examples\n ========\n\n >>> from sympy.matrices import eye\n >>> M = eye(3)\n >>> M.row_del(1)\n >>> M\n Matrix([\n [1, 0, 0],\n [0, 0, 1]])\n\n See Also\n ========\n\n row\n col_del\n \"\"\"\n if i < -self.rows or i >= self.rows:\n raise IndexError(\"Index out of range: 'i = %s', valid -%s <= i\"\n \" < %s\" % (i, self.rows, self.rows))\n if i < 0:\n i += self.rows\n del self._mat[i*self.cols:(i+1)*self.cols]\n self.rows -= 1\n\n def row_op(self, i, f):\n \"\"\"In-place operation on row ``i`` using two-arg functor whose args are\n interpreted as ``(self[i, j], j)``.\n\n Examples\n ========\n\n >>> from sympy.matrices import eye\n >>> M = eye(3)\n >>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M\n Matrix([\n [1, 0, 0],\n [2, 1, 0],\n [0, 0, 1]])\n\n See Also\n ========\n row\n zip_row_op\n col_op\n\n \"\"\"\n i0 = i*self.cols\n ri = self._mat[i0: i0 + self.cols]\n self._mat[i0: i0 + self.cols] = [f(x, j) for x, j in zip(ri, list(range(self.cols)))]\n\n def row_swap(self, i, j):\n \"\"\"Swap the two given rows of the matrix in-place.\n\n Examples\n ========\n\n >>> from sympy.matrices import Matrix\n >>> M = Matrix([[0, 1], [1, 0]])\n >>> M\n Matrix([\n [0, 1],\n [1, 0]])\n >>> M.row_swap(0, 1)\n >>> M\n Matrix([\n [1, 0],\n [0, 1]])\n\n See Also\n ========\n\n row\n col_swap\n \"\"\"\n for k in range(0, self.cols):\n self[i, k], self[j, k] = self[j, k], self[i, k]\n\n def simplify(self, ratio=1.7, measure=count_ops, rational=False, inverse=False):\n \"\"\"Applies simplify to the elements of a matrix in place.\n\n This is a shortcut for M.applyfunc(lambda x: simplify(x, ratio, measure))\n\n See Also\n ========\n\n sympy.simplify.simplify.simplify\n \"\"\"\n for i in range(len(self._mat)):\n self._mat[i] = _simplify(self._mat[i], ratio=ratio, measure=measure,\n rational=rational, inverse=inverse)\n\n def zip_row_op(self, i, k, f):\n \"\"\"In-place operation on row ``i`` using two-arg functor whose args are\n interpreted as ``(self[i, j], self[k, j])``.\n\n Examples\n ========\n\n >>> from sympy.matrices import eye\n >>> M = eye(3)\n >>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M\n Matrix([\n [1, 0, 0],\n [2, 1, 0],\n [0, 0, 1]])\n\n See Also\n ========\n row\n row_op\n col_op\n\n \"\"\"\n i0 = i*self.cols\n k0 = k*self.cols\n\n ri = self._mat[i0: i0 + self.cols]\n rk = self._mat[k0: k0 + self.cols]\n\n self._mat[i0: i0 + self.cols] = [f(x, y) for x, y in zip(ri, rk)]\n\n # Utility functions\n\nMutableMatrix = Matrix = MutableDenseMatrix\n\n###########\n# Numpy Utility Functions:\n# list2numpy, matrix2numpy, symmarray, rot_axis[123]\n###########\n\n\ndef list2numpy(l, dtype=object): # pragma: no cover\n \"\"\"Converts python list of SymPy expressions to a NumPy array.\n\n See Also\n ========\n\n matrix2numpy\n \"\"\"\n from numpy import empty\n a = empty(len(l), dtype)\n for i, s in enumerate(l):\n a[i] = s\n return a\n\n\ndef matrix2numpy(m, dtype=object): # pragma: no cover\n \"\"\"Converts SymPy's matrix to a NumPy array.\n\n See Also\n ========\n\n list2numpy\n \"\"\"\n from numpy import empty\n a = empty(m.shape, dtype)\n for i in range(m.rows):\n for j in range(m.cols):\n a[i, j] = m[i, j]\n return a\n\n\ndef rot_axis3(theta):\n \"\"\"Returns a rotation matrix for a rotation of theta (in radians) about\n the 3-axis.\n\n Examples\n ========\n\n >>> from sympy import pi\n >>> from sympy.matrices import rot_axis3\n\n A rotation of pi/3 (60 degrees):\n\n >>> theta = pi/3\n >>> rot_axis3(theta)\n Matrix([\n [ 1/2, sqrt(3)/2, 0],\n [-sqrt(3)/2, 1/2, 0],\n [ 0, 0, 1]])\n\n If we rotate by pi/2 (90 degrees):\n\n >>> rot_axis3(pi/2)\n Matrix([\n [ 0, 1, 0],\n [-1, 0, 0],\n [ 0, 0, 1]])\n\n See Also\n ========\n\n rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)\n about the 1-axis\n rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)\n about the 2-axis\n \"\"\"\n ct = cos(theta)\n st = sin(theta)\n lil = ((ct, st, 0),\n (-st, ct, 0),\n (0, 0, 1))\n return Matrix(lil)\n\n\ndef rot_axis2(theta):\n \"\"\"Returns a rotation matrix for a rotation of theta (in radians) about\n the 2-axis.\n\n Examples\n ========\n\n >>> from sympy import pi\n >>> from sympy.matrices import rot_axis2\n\n A rotation of pi/3 (60 degrees):\n\n >>> theta = pi/3\n >>> rot_axis2(theta)\n Matrix([\n [ 1/2, 0, -sqrt(3)/2],\n [ 0, 1, 0],\n [sqrt(3)/2, 0, 1/2]])\n\n If we rotate by pi/2 (90 degrees):\n\n >>> rot_axis2(pi/2)\n Matrix([\n [0, 0, -1],\n [0, 1, 0],\n [1, 0, 0]])\n\n See Also\n ========\n\n rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)\n about the 1-axis\n rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)\n about the 3-axis\n \"\"\"\n ct = cos(theta)\n st = sin(theta)\n lil = ((ct, 0, -st),\n (0, 1, 0),\n (st, 0, ct))\n return Matrix(lil)\n\n\ndef rot_axis1(theta):\n \"\"\"Returns a rotation matrix for a rotation of theta (in radians) about\n the 1-axis.\n\n Examples\n ========\n\n >>> from sympy import pi\n >>> from sympy.matrices import rot_axis1\n\n A rotation of pi/3 (60 degrees):\n\n >>> theta = pi/3\n >>> rot_axis1(theta)\n Matrix([\n [1, 0, 0],\n [0, 1/2, sqrt(3)/2],\n [0, -sqrt(3)/2, 1/2]])\n\n If we rotate by pi/2 (90 degrees):\n\n >>> rot_axis1(pi/2)\n Matrix([\n [1, 0, 0],\n [0, 0, 1],\n [0, -1, 0]])\n\n See Also\n ========\n\n rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)\n about the 2-axis\n rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)\n about the 3-axis\n \"\"\"\n ct = cos(theta)\n st = sin(theta)\n lil = ((1, 0, 0),\n (0, ct, st),\n (0, -st, ct))\n return Matrix(lil)\n\n\n@doctest_depends_on(modules=('numpy',))\ndef symarray(prefix, shape, **kwargs): # pragma: no cover\n r\"\"\"Create a numpy ndarray of symbols (as an object array).\n\n The created symbols are named ``prefix_i1_i2_``... You should thus provide a\n non-empty prefix if you want your symbols to be unique for different output\n arrays, as SymPy symbols with identical names are the same object.\n\n Parameters\n ----------\n\n prefix : string\n A prefix prepended to the name of every symbol.\n\n shape : int or tuple\n Shape of the created array. If an int, the array is one-dimensional; for\n more than one dimension the shape must be a tuple.\n\n \\*\\*kwargs : dict\n keyword arguments passed on to Symbol\n\n Examples\n ========\n These doctests require numpy.\n\n >>> from sympy import symarray\n >>> symarray('', 3)\n [_0 _1 _2]\n\n If you want multiple symarrays to contain distinct symbols, you *must*\n provide unique prefixes:\n\n >>> a = symarray('', 3)\n >>> b = symarray('', 3)\n >>> a[0] == b[0]\n True\n >>> a = symarray('a', 3)\n >>> b = symarray('b', 3)\n >>> a[0] == b[0]\n False\n\n Creating symarrays with a prefix:\n\n >>> symarray('a', 3)\n [a_0 a_1 a_2]\n\n For more than one dimension, the shape must be given as a tuple:\n\n >>> symarray('a', (2, 3))\n [[a_0_0 a_0_1 a_0_2]\n [a_1_0 a_1_1 a_1_2]]\n >>> symarray('a', (2, 3, 2))\n [[[a_0_0_0 a_0_0_1]\n [a_0_1_0 a_0_1_1]\n [a_0_2_0 a_0_2_1]]\n <BLANKLINE>\n [[a_1_0_0 a_1_0_1]\n [a_1_1_0 a_1_1_1]\n [a_1_2_0 a_1_2_1]]]\n\n For setting assumptions of the underlying Symbols:\n\n >>> [s.is_real for s in symarray('a', 2, real=True)]\n [True, True]\n \"\"\"\n from numpy import empty, ndindex\n arr = empty(shape, dtype=object)\n for index in ndindex(shape):\n arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))),\n **kwargs)\n return arr\n\n\n###############\n# Functions\n###############\n\ndef casoratian(seqs, n, zero=True):\n \"\"\"Given linear difference operator L of order 'k' and homogeneous\n equation Ly = 0 we want to compute kernel of L, which is a set\n of 'k' sequences: a(n), b(n), ... z(n).\n\n Solutions of L are linearly independent iff their Casoratian,\n denoted as C(a, b, ..., z), do not vanish for n = 0.\n\n Casoratian is defined by k x k determinant::\n\n + a(n) b(n) . . . z(n) +\n | a(n+1) b(n+1) . . . z(n+1) |\n | . . . . |\n | . . . . |\n | . . . . |\n + a(n+k-1) b(n+k-1) . . . z(n+k-1) +\n\n It proves very useful in rsolve_hyper() where it is applied\n to a generating set of a recurrence to factor out linearly\n dependent solutions and return a basis:\n\n >>> from sympy import Symbol, casoratian, factorial\n >>> n = Symbol('n', integer=True)\n\n Exponential and factorial are linearly independent:\n\n >>> casoratian([2**n, factorial(n)], n) != 0\n True\n\n \"\"\"\n\n seqs = list(map(sympify, seqs))\n\n if not zero:\n f = lambda i, j: seqs[j].subs(n, n + i)\n else:\n f = lambda i, j: seqs[j].subs(n, i)\n\n k = len(seqs)\n\n return Matrix(k, k, f).det()\n\n\ndef eye(*args, **kwargs):\n \"\"\"Create square identity matrix n x n\n\n See Also\n ========\n\n diag\n zeros\n ones\n \"\"\"\n\n return Matrix.eye(*args, **kwargs)\n\n\ndef diag(*values, **kwargs):\n \"\"\"Returns a matrix with the provided values placed on the\n diagonal. If non-square matrices are included, they will\n produce a block-diagonal matrix.\n\n Examples\n ========\n\n This version of diag is a thin wrapper to Matrix.diag that differs\n in that it treats all lists like matrices -- even when a single list\n is given. If this is not desired, either put a `*` before the list or\n set `unpack=True`.\n\n >>> from sympy import diag\n\n >>> diag([1, 2, 3], unpack=True) # = diag(1,2,3) or diag(*[1,2,3])\n Matrix([\n [1, 0, 0],\n [0, 2, 0],\n [0, 0, 3]])\n\n >>> diag([1, 2, 3]) # a column vector\n Matrix([\n [1],\n [2],\n [3]])\n\n See Also\n ========\n .common.MatrixCommon.eye\n .common.MatrixCommon.diagonal - to extract a diagonal\n .common.MatrixCommon.diag\n .expressions.blockmatrix.BlockMatrix\n \"\"\"\n # Extract any setting so we don't duplicate keywords sent\n # as named parameters:\n kw = kwargs.copy()\n strict = kw.pop('strict', True) # lists will be converted to Matrices\n unpack = kw.pop('unpack', False)\n return Matrix.diag(*values, strict=strict, unpack=unpack, **kw)\n\n\ndef GramSchmidt(vlist, orthonormal=False):\n \"\"\"Apply the Gram-Schmidt process to a set of vectors.\n\n Parameters\n ==========\n\n vlist : List of Matrix\n Vectors to be orthogonalized for.\n\n orthonormal : Bool, optional\n If true, return an orthonormal basis.\n\n Returns\n =======\n\n vlist : List of Matrix\n Orthogonalized vectors\n\n Notes\n =====\n\n This routine is mostly duplicate from ``Matrix.orthogonalize``,\n except for some difference that this always raises error when\n linearly dependent vectors are found, and the keyword ``normalize``\n has been named as ``orthonormal`` in this function.\n\n See Also\n ========\n\n .matrices.MatrixSubspaces.orthogonalize\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process\n \"\"\"\n return MutableDenseMatrix.orthogonalize(\n *vlist, normalize=orthonormal, rankcheck=True\n )\n\n\ndef hessian(f, varlist, constraints=[]):\n \"\"\"Compute Hessian matrix for a function f wrt parameters in varlist\n which may be given as a sequence or a row/column vector. A list of\n constraints may optionally be given.\n\n Examples\n ========\n\n >>> from sympy import Function, hessian, pprint\n >>> from sympy.abc import x, y\n >>> f = Function('f')(x, y)\n >>> g1 = Function('g')(x, y)\n >>> g2 = x**2 + 3*y\n >>> pprint(hessian(f, (x, y), [g1, g2]))\n [ d d ]\n [ 0 0 --(g(x, y)) --(g(x, y)) ]\n [ dx dy ]\n [ ]\n [ 0 0 2*x 3 ]\n [ ]\n [ 2 2 ]\n [d d d ]\n [--(g(x, y)) 2*x ---(f(x, y)) -----(f(x, y))]\n [dx 2 dy dx ]\n [ dx ]\n [ ]\n [ 2 2 ]\n [d d d ]\n [--(g(x, y)) 3 -----(f(x, y)) ---(f(x, y)) ]\n [dy dy dx 2 ]\n [ dy ]\n\n References\n ==========\n\n https://en.wikipedia.org/wiki/Hessian_matrix\n\n See Also\n ========\n\n sympy.matrices.mutable.Matrix.jacobian\n wronskian\n \"\"\"\n # f is the expression representing a function f, return regular matrix\n if isinstance(varlist, MatrixBase):\n if 1 not in varlist.shape:\n raise ShapeError(\"`varlist` must be a column or row vector.\")\n if varlist.cols == 1:\n varlist = varlist.T\n varlist = varlist.tolist()[0]\n if is_sequence(varlist):\n n = len(varlist)\n if not n:\n raise ShapeError(\"`len(varlist)` must not be zero.\")\n else:\n raise ValueError(\"Improper variable list in hessian function\")\n if not getattr(f, 'diff'):\n # check differentiability\n raise ValueError(\"Function `f` (%s) is not differentiable\" % f)\n m = len(constraints)\n N = m + n\n out = zeros(N)\n for k, g in enumerate(constraints):\n if not getattr(g, 'diff'):\n # check differentiability\n raise ValueError(\"Function `f` (%s) is not differentiable\" % f)\n for i in range(n):\n out[k, i + m] = g.diff(varlist[i])\n for i in range(n):\n for j in range(i, n):\n out[i + m, j + m] = f.diff(varlist[i]).diff(varlist[j])\n for i in range(N):\n for j in range(i + 1, N):\n out[j, i] = out[i, j]\n return out\n\n\ndef jordan_cell(eigenval, n):\n \"\"\"\n Create a Jordan block:\n\n Examples\n ========\n\n >>> from sympy.matrices import jordan_cell\n >>> from sympy.abc import x\n >>> jordan_cell(x, 4)\n Matrix([\n [x, 1, 0, 0],\n [0, x, 1, 0],\n [0, 0, x, 1],\n [0, 0, 0, x]])\n \"\"\"\n\n return Matrix.jordan_block(size=n, eigenvalue=eigenval)\n\n\ndef matrix_multiply_elementwise(A, B):\n \"\"\"Return the Hadamard product (elementwise product) of A and B\n\n >>> from sympy.matrices import matrix_multiply_elementwise\n >>> from sympy.matrices import Matrix\n >>> A = Matrix([[0, 1, 2], [3, 4, 5]])\n >>> B = Matrix([[1, 10, 100], [100, 10, 1]])\n >>> matrix_multiply_elementwise(A, B)\n Matrix([\n [ 0, 10, 200],\n [300, 40, 5]])\n\n See Also\n ========\n\n __mul__\n \"\"\"\n return A.multiply_elementwise(B)\n\n\ndef ones(*args, **kwargs):\n \"\"\"Returns a matrix of ones with ``rows`` rows and ``cols`` columns;\n if ``cols`` is omitted a square matrix will be returned.\n\n See Also\n ========\n\n zeros\n eye\n diag\n \"\"\"\n\n if 'c' in kwargs:\n kwargs['cols'] = kwargs.pop('c')\n\n return Matrix.ones(*args, **kwargs)\n\n\ndef randMatrix(r, c=None, min=0, max=99, seed=None, symmetric=False,\n percent=100, prng=None):\n \"\"\"Create random matrix with dimensions ``r`` x ``c``. If ``c`` is omitted\n the matrix will be square. If ``symmetric`` is True the matrix must be\n square. If ``percent`` is less than 100 then only approximately the given\n percentage of elements will be non-zero.\n\n The pseudo-random number generator used to generate matrix is chosen in the\n following way.\n\n * If ``prng`` is supplied, it will be used as random number generator.\n It should be an instance of :class:`random.Random`, or at least have\n ``randint`` and ``shuffle`` methods with same signatures.\n * if ``prng`` is not supplied but ``seed`` is supplied, then new\n :class:`random.Random` with given ``seed`` will be created;\n * otherwise, a new :class:`random.Random` with default seed will be used.\n\n Examples\n ========\n\n >>> from sympy.matrices import randMatrix\n >>> randMatrix(3) # doctest:+SKIP\n [25, 45, 27]\n [44, 54, 9]\n [23, 96, 46]\n >>> randMatrix(3, 2) # doctest:+SKIP\n [87, 29]\n [23, 37]\n [90, 26]\n >>> randMatrix(3, 3, 0, 2) # doctest:+SKIP\n [0, 2, 0]\n [2, 0, 1]\n [0, 0, 1]\n >>> randMatrix(3, symmetric=True) # doctest:+SKIP\n [85, 26, 29]\n [26, 71, 43]\n [29, 43, 57]\n >>> A = randMatrix(3, seed=1)\n >>> B = randMatrix(3, seed=2)\n >>> A == B # doctest:+SKIP\n False\n >>> A == randMatrix(3, seed=1)\n True\n >>> randMatrix(3, symmetric=True, percent=50) # doctest:+SKIP\n [77, 70, 0],\n [70, 0, 0],\n [ 0, 0, 88]\n \"\"\"\n if c is None:\n c = r\n # Note that ``Random()`` is equivalent to ``Random(None)``\n prng = prng or random.Random(seed)\n\n if not symmetric:\n m = Matrix._new(r, c, lambda i, j: prng.randint(min, max))\n if percent == 100:\n return m\n z = int(r*c*(100 - percent) // 100)\n m._mat[:z] = [S.Zero]*z\n prng.shuffle(m._mat)\n\n return m\n\n # Symmetric case\n if r != c:\n raise ValueError('For symmetric matrices, r must equal c, but %i != %i' % (r, c))\n m = zeros(r)\n ij = [(i, j) for i in range(r) for j in range(i, r)]\n if percent != 100:\n ij = prng.sample(ij, int(len(ij)*percent // 100))\n\n for i, j in ij:\n value = prng.randint(min, max)\n m[i, j] = m[j, i] = value\n return m\n\n\ndef wronskian(functions, var, method='bareiss'):\n \"\"\"\n Compute Wronskian for [] of functions\n\n ::\n\n | f1 f2 ... fn |\n | f1' f2' ... fn' |\n | . . . . |\n W(f1, ..., fn) = | . . . . |\n | . . . . |\n | (n) (n) (n) |\n | D (f1) D (f2) ... D (fn) |\n\n see: https://en.wikipedia.org/wiki/Wronskian\n\n See Also\n ========\n\n sympy.matrices.mutable.Matrix.jacobian\n hessian\n \"\"\"\n\n for index in range(0, len(functions)):\n functions[index] = sympify(functions[index])\n n = len(functions)\n if n == 0:\n return 1\n W = Matrix(n, n, lambda i, j: functions[i].diff(var, j))\n return W.det(method)\n\n\ndef zeros(*args, **kwargs):\n \"\"\"Returns a matrix of zeros with ``rows`` rows and ``cols`` columns;\n if ``cols`` is omitted a square matrix will be returned.\n\n See Also\n ========\n\n ones\n eye\n diag\n \"\"\"\n\n if 'c' in kwargs:\n kwargs['cols'] = kwargs.pop('c')\n\n return Matrix.zeros(*args, **kwargs)\n" ]
[ [ "numpy.ndindex", "numpy.empty" ] ]
xujli/cbof_torch
[ "ed8d67dd7a41b6345305d970d0f8fa0892f8ccee" ]
[ "bof_torch.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics.pairwise import pairwise_distances\nimport numpy as np\n\nclass BoF_Pooling(nn.Module):\n def __init__(self, n_codewords, features, spatial_level=0, **kwargs):\n super(BoF_Pooling, self).__init__()\n \"\"\"\n Initializes a BoF Pooling layer\n :param n_codewords: the number of the codewords to be used\n :param spatial_level: 0 -> no spatial pooling, 1 -> spatial pooling at level 1 (4 regions). Note that the\n codebook is shared between the different spatial regions\n :param kwargs:\n \"\"\"\n\n self.N_k = n_codewords\n self.spatial_level = spatial_level\n self.V, self.sigmas = None, None\n self.relu = nn.ReLU()\n self.init(features)\n\n self.softmax = nn.Softmax(dim=1)\n\n def init(self, features):\n self.V = nn.Parameter(nn.init.uniform_(torch.empty((self.N_k, features, 1, 1), requires_grad=True)))\n # self.V.shape = (output channels, input channels, kernel width, kernel height)\n self.sigmas = nn.Parameter(nn.init.constant_(torch.empty((1, self.N_k, 1, 1), requires_grad=True), 0.1))\n\n def forward(self, input):\n # Calculate the pairwise distances between the codewords and the feature vectors\n x_square = torch.sum(input=input, dim=1, keepdim=True)\n y_square = torch.sum(self.V ** 2, dim=1, keepdim=True).permute([3, 0, 1, 2]) # permute axis to\n\n dists = x_square + y_square - 2 * F.conv2d(input, self.V)\n #dists = torch.maximum(dists, torch.zeros(size=dists.shape))\n dists = self.relu(dists) # replace maximum to keep grads\n\n quantized_features = self.softmax(- dists / (self.sigmas ** 2))\n\n # Compile the histogram\n if self.spatial_level == 0:\n histogram = torch.mean(quantized_features, dim=[2, 3])\n elif self.spatial_level == 1:\n shape = quantized_features.shape\n mid_1 = shape[2] / 2\n mid_1 = int(mid_1)\n mid_2 = shape[3] / 2\n mid_2 = int(mid_2)\n histogram1 = torch.mean(quantized_features[:, :, :mid_1, :mid_2], [2, 3])\n histogram2 = torch.mean(quantized_features[:, :, mid_1:, :mid_2], [2, 3])\n histogram3 = torch.mean(quantized_features[:, :, :mid_1, mid_2:], [2, 3])\n histogram4 = torch.mean(quantized_features[:, :, mid_1:, mid_2:], [2, 3])\n histogram = torch.stack([histogram1, histogram2, histogram3, histogram4], 1)\n histogram = torch.reshape(histogram, (-1, 4 * self.N_k))\n else:\n # No other spatial level is currently supported (it is trivial to extend the code)\n assert False\n\n # Simple trick to avoid rescaling issues\n return histogram * self.N_k\n\n def compute_output_shape(self, input_shape): # 当spatial_level=0时,输出的特征数=n_codewords,为1时输出的特征数为n_codewords * 4\n if self.spatial_level == 0:\n return (input_shape[0], self.N_k)\n elif self.spatial_level == 1:\n return (input_shape[0], 4 * self.N_k)\n\n\ndef initialize_bof_layers(model, data_loader, n_samples=100, n_feature_samples=5000, batch_size=32, k_means_max_iters=300,\n k_means_n_init=4):\n \"\"\"\n Initializes the BoF layers of a model\n :param model: the model\n :param data: data to be used for initializing the model\n :param n_samples: number of data samples used for the initializes\n :param n_feature_samples: number of feature vectors to be used for the clustering process\n :param batch_size:\n :param k_means_max_iters: the maximum number of iterations for the clustering algorithm (k-means)\n :param k_means_n_init: defines how many times to run the k-means algorithm\n :return:\n \"\"\"\n features = {}\n def get_features(name):\n def hook(module, input):\n if len(input) == 1:\n data = input[0].cpu().detach().permute([0, 2, 3, 1]).numpy()\n features[name].append(data.reshape(-1, data.shape[-1]))\n\n return hook\n\n iternum = int(n_samples / batch_size + 0.5)\n for name, layer in model.named_modules():\n if isinstance(layer, BoF_Pooling):\n print(\"Found BoF layer (layer %s), initializing...\" % name)\n\n # Compile a function for getting the feature vectors\n # get_features = K.function([model.input] + [model.training], [model.layers[i - 1].output])\n features[name] = []\n handler = layer.register_forward_pre_hook(get_features(name))\n\n # iterate dataset to trigger hook to get features\n for i in range(iternum):\n data, labels = data_loader.__iter__().next()\n\n if len(list(data.shape)) == 5:\n data = data[:, 0]\n if torch.cuda.is_available():\n data = data.cuda()\n output = model(data)\n\n handler.remove()\n\n layer_features = np.concatenate(features[name])\n np.random.shuffle(layer_features)\n layer_features = layer_features[:n_feature_samples]\n\n # Cluster the features\n kmeans = KMeans(n_clusters=layer.N_k, n_init=k_means_n_init, max_iter=k_means_max_iters)\n kmeans.fit(layer_features)\n # V of BoF pooling layer\n V = kmeans.cluster_centers_\n V = V.reshape((V.shape[0], V.shape[1], 1, 1))\n\n # Set the value for the codebook\n layer.V.data = torch.tensor(np.float32(V)).cuda() if torch.cuda.is_available() else \\\n torch.tensor(np.float32(V))\n # Get the mean distance for initializing the sigmas\n mean_dist = np.mean(pairwise_distances(layer_features[:100]))\n\n # Set the value for sigmas\n sigmas = np.ones((1, layer.N_k, 1, 1)) * (mean_dist ** 2)\n layer.sigmas.data = torch.tensor(np.float32(sigmas)).cuda() if torch.cuda.is_available() else \\\n torch.tensor(np.float32(sigmas))\n\n\nif __name__ == '__main__':\n x = torch.ones(size=(32, 32, 11, 11)) * 0.5\n model = BoF_Pooling(64, features=32, spatial_level=1)\n y = model(x)\n print(y.mean())\n" ]
[ [ "numpy.concatenate", "torch.reshape", "torch.stack", "torch.nn.Softmax", "sklearn.cluster.KMeans", "numpy.ones", "numpy.random.shuffle", "torch.ones", "torch.nn.ReLU", "numpy.float32", "torch.cuda.is_available", "sklearn.metrics.pairwise.pairwise_distances", "torch.empty", "torch.nn.functional.conv2d", "torch.mean", "torch.sum" ] ]
LiamLYJ/scene_seg
[ "8e0f55f6890ab7b18076650a80a86ccca249940a" ]
[ "test.py" ]
[ "import torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.models as models\nimport torch.nn.functional as nn_F\nfrom nets import encoder_net, decoder_net\nimport numpy as np\nimport argparse\nfrom data_loader import texture_seg_dataset, get_data_direct\nfrom utils import seg_loss, load_model, remap2normal, normal_masks\nimport os\nimport cv2\nimport time\n\ndef main(args):\n batch_size = args.batch_size\n model_dir = args.model_dir\n save_dir = args.save_dir\n filt_stride = args.filt_stride\n filt_size = args.filt_size\n\n if not args.mode is None:\n device = torch.device(args.mode)\n else:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # device = torch.device('cpu')\n\n data_set = texture_seg_dataset(args.image_dir,\n img_size = args.img_size,\n segmentation_regions = args.segmentation_regions,\n texture_size = args.texture_size,\n use_same_from = args.use_same_from)\n imgs, textures, masks = data_set.feed(batch_size)\n\n model_encoder = encoder_net().to(device)\n model_decoder = decoder_net().to(device)\n filt_adp = nn.AdaptiveAvgPool2d((filt_size,filt_size))\n\n model_encoder, model_decoder, iter_old = load_model(model_dir, model_encoder, model_decoder)\n print ('load model from %d iter'%(iter_old))\n\n imgs = torch.from_numpy(imgs)\n textures = torch.from_numpy(textures)\n masks = torch.from_numpy(masks)\n\n imgs = imgs.type(torch.FloatTensor).to(device)\n textures = textures.type(torch.FloatTensor).to(device)\n masks = masks.type(torch.FloatTensor).to(device)\n\n encoder_img, vgg_features = model_encoder(imgs)\n encoder_texture, _ = model_encoder(textures)\n\n filt = filt_adp(encoder_texture).to(device)\n\n correlations = []\n for index in range(batch_size):\n t0 = encoder_img[index].cuda()\n t1 = filt[index].cuda()\n padding = (filt_stride - 1) * t0.shape[-1] - filt_stride + filt.shape[-1]\n padding = int(padding / 2)\n correlations.append(nn_F.conv2d(t0.unsqueeze(0), t1.unsqueeze(0), stride = filt_stride, padding = padding))\n correlations = torch.cat(correlations, 0)\n output_masks, _ = model_decoder(correlations, vgg_features)\n print ('output_masks: ', output_masks.shape)\n print ('img shape: ', imgs.shape)\n print ('masks shape:', masks.shape)\n\n imgs = remap2normal(imgs.cpu())\n textures = remap2normal(textures.cpu())\n output_masks = normal_masks(output_masks.cpu())\n\n for index in range(batch_size):\n torchvision.utils.save_image(imgs[index], os.path.join(save_dir, 'input_img_%02d.png'%(index)))\n torchvision.utils.save_image(masks[index], os.path.join(save_dir, 'gt_%02d.png'%(index)))\n torchvision.utils.save_image(output_masks[index], os.path.join(save_dir, 'output_%02d.png'%(index)))\n torchvision.utils.save_image(textures[index], os.path.join(save_dir, 'texture_%02d.png'%(index)))\n\n\ndef load_direct(args):\n model_dir = args.model_dir\n save_dir = args.save_dir\n filt_stride = args.filt_stride\n filt_size = args.filt_size\n batch_size = args.batch_size\n\n if not args.mode is None:\n device = torch.device(args.mode)\n else:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # device = torch.device('cpu')\n\n model_encoder = encoder_net().to(device)\n model_decoder = decoder_net().to(device)\n filt_adp = nn.AdaptiveAvgPool2d((filt_size,filt_size))\n\n model_encoder, model_decoder, iter_old = load_model(model_dir, model_encoder, model_decoder)\n print ('load model from %d iter'%(iter_old))\n\n all_imgs, all_textures = get_data_direct(img_size = args.img_size, imgs_dir = args.imgs_dir,\n texture_size = args.texture_size, textures_dir = args.textures_dir)\n all_batch_size = all_imgs.shape[0]\n print ('all batch_size is:', all_batch_size)\n iter_num = all_batch_size // batch_size\n\n # only allow to use remain is 0!!!\n assert (all_batch_size % batch_size == 0)\n\n for iter in range(iter_num):\n imgs = all_imgs[iter*batch_size:(iter+1)*batch_size, ...]\n textures = all_textures[iter*batch_size:(iter+1)*batch_size, ...]\n\n imgs = torch.from_numpy(imgs)\n textures = torch.from_numpy(textures)\n\n imgs = imgs.type(torch.FloatTensor).to(device)\n textures = textures.type(torch.FloatTensor).to(device)\n\n start = time.time()\n\n encoder_img, vgg_features = model_encoder(imgs)\n encoder_texture, _ = model_encoder(textures)\n\n filt = filt_adp(encoder_texture).to(device)\n\n correlations = []\n\n for index in range(batch_size):\n t0 = encoder_img[index].cuda()\n t1 = filt[index].cuda()\n padding = (filt_stride - 1) * t0.shape[-1] - filt_stride + filt.shape[-1]\n padding = int(padding / 2)\n correlations.append(nn_F.conv2d(t0.unsqueeze(0), t1.unsqueeze(0), stride = filt_stride, padding = padding))\n correlations = torch.cat(correlations, 0).to(device)\n output_masks, _ = model_decoder(correlations, vgg_features)\n\n end = time.time()\n print ('run for one frame (sec):', (end - start))\n\n print ('output_masks: ', output_masks.shape)\n print ('img shape: ', imgs.shape)\n\n imgs = remap2normal(imgs.cpu())\n textures = remap2normal(textures.cpu())\n output_masks = normal_masks(output_masks.cpu())\n\n for index in range(batch_size):\n torchvision.utils.save_image(imgs[index], os.path.join(save_dir, 'input_img_%d_%02d.png'%(iter, index)))\n torchvision.utils.save_image(output_masks[index], os.path.join(save_dir, 'output_%d_%02d.png'%(iter, index)))\n torchvision.utils.save_image(textures[index], os.path.join(save_dir, 'texture_%d_%02d.png'%(iter, index)))\n\n\nif __name__ == '__main__':\n # path\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_dir', type=str, default='./models/scene_model' , help='path for saving trained models')\n parser.add_argument('--image_dir', type=str, default='./dataset/dtd/images', help='directory for images from')\n parser.add_argument('--mode', type=str, default=None, help = 'mode to use ')\n parser.add_argument('--use_same_from', type=bool, default=True, help = 'if use the same texture from that same')\n # parser.add_argument('--save_dir', type=str, default='./save_dtd', help='directory for saving ')\n # parser.add_argument('--save_dir', type=str, default='./save_scene', help='directory for saving ')\n parser.add_argument('--save_dir', type=str, default='./real_test', help='directory for saving ')\n\n parser.add_argument('--filt_stride', type=int , default=1, help='convolution stride of textural filt')\n parser.add_argument('--filt_size', type=int , default=5, help='convolution filt size of textural filt')\n\n # Model parameters\n parser.add_argument('--img_size', type=int , default=256, help='input image size')\n parser.add_argument('--segmentation_regions', type=int , default=3, help='number of segmentation_regions')\n parser.add_argument('--texture_size', type=int , default=64, help='texture input size')\n\n parser.add_argument('--batch_size', type=int, default=1)\n\n parser.add_argument('--imgs_dir', type=str, default='./imgs_dir', help='directory for images from')\n parser.add_argument('--textures_dir', type=str, default='./textures_dir', help='directory for textures from')\n\n args = parser.parse_args()\n\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n # main(args)\n load_direct(args)\n" ]
[ [ "torch.device", "torch.cat", "torch.from_numpy", "torch.cuda.is_available", "torch.nn.AdaptiveAvgPool2d" ] ]
helloTC/Rest_activation_prediction
[ "f67cfe221d9f63afd67a2a5ef6330b8519ca7641" ]
[ "trt_reliability_region.py" ]
[ "# Calculate test-retest reliability in these 40 subjects across global brain\n\nfrom os.path import join as pjoin\nfrom ATT.algorithm import tools\nimport framework_rt as fr\nimport cifti\nimport numpy as np\n\nparpath = '/nfs/s2/userhome/huangtaicheng/hworkingshop/hcp_test'\n\nwith open(pjoin(parpath, 'tables', 'sessid_trt'), 'r') as f:\n sessid_trt = f.read().splitlines()\n\n# mask, header = cifti.read(pjoin(parpath, 'rest_comp', 'mmp_subcortex_mask.dscalar.nii'))\nmask, header = cifti.read(pjoin(parpath, 'rest_comp', 'LGL_100Parcels_7Network_subregion.dscalar.nii'))\nmasklabel = np.unique(mask[mask!=0])\n\nactmap_path = [pjoin(parpath, 'task_merge_cohend', 'cohend_47contrast_zscore', sid+'_cohend_zscore.dtseries.nii') for sid in sessid_trt]\nactmap_trt_path = [pjoin(parpath, 'task_merge_cohend', 'cohend_47contrast_zscore_trt', sid+'_cohend_zscore.dscalar.nii') for sid in sessid_trt]\n\nactmap = fr.cifti_read(actmap_path, np.arange(47), 'all')\nactmap_trt = fr.cifti_read(actmap_trt_path, np.arange(47), 'all')\n\ntrt_map = np.zeros((47,1,91282))\nfor task in range(47):\n reliability, discrimination = fr.pred_partsim(actmap[:,task,:], actmap_trt[:,task,:], mask[0,:]) \n mean_reliability = np.mean(reliability,axis=1)\n for i, masklbl in enumerate(masklabel):\n trt_map[task, (mask==masklbl)] = mean_reliability[i]\n\n\n\n\n\n\n\n" ]
[ [ "numpy.mean", "numpy.arange", "numpy.zeros", "numpy.unique" ] ]
l337x911/inphadel
[ "7d5ad58d90d745ed82226a8b0b983875cbe1d26e" ]
[ "src/svphase/scripts/merge_coords.py" ]
[ "\"Merges bed files based on chr, start, end\" \n\nimport pandas as pd\nimport sys\n\ndef ucsc_fmt(s):\n\treturn \"{0}:{1}-{2}\".format(s.ix[0],s.ix[1],s.ix[2])\n\ndef merge(beda,chain_ab,bedb):\n\t\n\tbeda_df = pd.read_csv(beda, sep='\\t', index_col=None, header=None, names=['chrA','startA','endA','mid'])\n\tchain_df = pd.read_csv(chain_ab, sep='\\t', index_col=None, header=None, names=['chrB','startB','endB','mid'])\n\tbedb_df = pd.read_csv(bedb, sep='\\t', index_col=None, header=None, names=['chrB','startB','endB','label'])\n\t\n\tdf = pd.merge(chain_df, bedb_df, on=['chrB','startB','endB'], how='outer')\n\tdf = pd.merge(beda_df, df, on=['mid'], how='inner')\t\n\t\n\tdf = df.apply(lambda s:pd.Series([ucsc_fmt(s[['chrA','startA','endA']]),ucsc_fmt(s[['chrB','startB','endB']]),s['label'],s['label']], index=['posA','posB','label','edit']), axis=1)\n\t\n\tdf.to_csv(sys.stdout, sep='\\t',index=False, header=True) \n\nif __name__=='__main__':\n\timport argparse\n\t\n\tparser = argparse.ArgumentParser(description=__doc__)\n\tparser.add_argument('hg18_bed', help='Bed')\n\tparser.add_argument('coord_to_mid_chain', help='Coord Bed')\n\tparser.add_argument('hg19_bed_anno', help='Annotated bed')\n\t\n\targs = parser.parse_args()\n\tmerge(args.hg18_bed, args.coord_to_mid_chain, args.hg19_bed_anno)\n" ]
[ [ "pandas.read_csv", "pandas.merge" ] ]
AndreFCruz/scikit-multiflow
[ "c4dbbb70d4ed839d95a18ca799f073ac9ff9ba49" ]
[ "tests/data/test_mixed_generator.py" ]
[ "import os\nimport numpy as np\nfrom skmultiflow.data.mixed_generator import MIXEDGenerator\n\n\ndef test_mixed_generator(test_path):\n stream = MIXEDGenerator(classification_function=1, random_state=112, balance_classes=False)\n stream.prepare_for_use()\n\n assert stream.n_remaining_samples() == -1\n\n expected_names = ['att_num_0', 'att_num_1', 'att_num_2', 'att_num_3']\n\n assert stream.feature_names == expected_names\n\n expected_targets = [0, 1]\n assert stream.target_values == expected_targets\n\n assert stream.target_names == ['target_0']\n\n assert stream.n_features == 4\n\n assert stream.n_cat_features == 2\n\n assert stream.n_num_features == 2\n\n assert stream.n_targets == 1\n\n assert stream.get_data_info() == 'Mixed Generator - 1 targets, 2 classes, 4 features'\n\n assert stream.has_more_samples() is True\n\n assert stream.is_restartable() is True\n\n\n # Load test data corresponding to first 10 instances\n test_file = os.path.join(test_path, 'mixed_stream.npz')\n data = np.load(test_file)\n X_expected = data['X']\n y_expected = data['y']\n\n X, y = stream.next_sample()\n assert np.alltrue(X[0] == X_expected[0])\n assert np.alltrue(y[0] == y_expected[0])\n\n X, y = stream.last_sample()\n assert np.alltrue(X[0] == X_expected[0])\n assert np.alltrue(y[0] == y_expected[0])\n\n stream.restart()\n X, y = stream.next_sample(10)\n assert np.alltrue(X == X_expected)\n assert np.alltrue(y == y_expected)\n\n assert stream.n_targets == np.array(y).ndim\n\n assert stream.n_features == X.shape[1]\n" ]
[ [ "numpy.alltrue", "numpy.array", "numpy.load" ] ]
luketaverne/handcam
[ "e294ebf2be8b5512c8607d3c8ba3f6946f3b8e30" ]
[ "handcam/train_WRN_Keras.py" ]
[ "from datetime import datetime\n\nfrom tensorflow.python.keras.callbacks import Callback\n\nfrom handcam.ltt.datasets.handcam.HandCamDataHandler import HandGenerator\nfrom handcam.ltt.datasets.uw_rgbd.UWDataHandler import (\n TrainGenerator,\n ValidationGenerator,\n)\nfrom handcam.ltt.network.Tools import write_log\nfrom handcam.ltt.util.Preprocessing import DataAugmentation\n\n\nimport cv2\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow\n\nfrom tensorflow.python.keras.optimizers import Adam\nfrom tensorflow.python.keras.callbacks import ModelCheckpoint, ProgbarLogger\nfrom tensorflow.python.keras.callbacks import TensorBoard\n\n\nfrom handcam.ltt.datasets.uw_rgbd import UWDataHandler\nfrom handcam.ltt.datasets.handcam import HandCamDataHandler\n\nuw_handler = UWDataHandler.Handler()\ndata_handler_handcam = HandCamDataHandler.Handler()\n\"\"\"\nConfiguration\n\"\"\"\nlog_dir = \"/local/home/luke/programming/master-thesis/python/logs/WRNKeras/\"\nconfig = {}\n\nconfig[\"loss\"] = \"categorical_crossentropy\"\nconfig[\"num_epochs\"] = 100000\nconfig[\"batch_size\"] = 32\nconfig[\"train_names\"] = [\"train_loss\", \"train_accuracy\"]\nconfig[\"val_names\"] = [\"val_loss\", \"val_accuracy\"]\nconfig[\"log-dir\"] = log_dir + datetime.now().strftime(\"%Y-%m-%d-%H:%M\")\n\n\n\"\"\"\nBegin setting up DenseNet\n\"\"\"\nfrom handcam.ltt.network.model.Wide_ResNet import wrn_keras_luke\n\n# im = cv2.resize(cv2.imread('/local/home/luke/programming/master-thesis/python/ltt/network/model/DenseNet/resources/cat.jpg'), (224, 224)).astype(np.float32)\n# im = cv2.resize(cv2.imread('/local/home/luke/programming/master-thesis/python/ltt/network/model/DenseNet/resources/shark.jpg'), (224, 224)).astype(np.float32)\n\n# Subtract mean pixel and multiple by scaling constant\n# Reference: https://github.com/shicai/DenseNet-Caffe\n# im[:,:,0] = (im[:,:,0] - 103.94) * 0.017\n# im[:,:,1] = (im[:,:,1] - 116.78) * 0.017\n# im[:,:,2] = (im[:,:,2] - 123.68) * 0.017\n\n# Use pre-trained weights from ImageNet\nweights_path = \"/local/home/luke/programming/master-thesis/python/ltt/network/model/Wide_ResNet/weights/WRNKeras-test-weights.hdf5\"\n\n# add a temp dimension for depth\n# print(im.shape)\n# im = np.concatenate((im,np.zeros((224,224,1))),axis=2)\n# print(im.shape)\n\n# Insert a new dimension for the batch_size\n# im = np.expand_dims(im, axis=0)\n# print(im.shape)\n\n# Test pretrained model\n\n# After the next line, `model` will have the pre-trained weights loaded\nmodel = wrn_keras_luke.define_keras_model((224, 224, 4), classes=uw_handler.num_classes)\n\n# sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)\nadam = Adam()\nmodel.compile(optimizer=adam, loss=config[\"loss\"], metrics=[\"accuracy\"])\n# model.load_weights(weights_path)\n\ncheckpointer = ModelCheckpoint(\n filepath=\"/tmp/WRNKeras-test-weights.hdf5\",\n verbose=1,\n save_best_only=True,\n save_weights_only=True,\n)\ncheckpointer.set_model(model)\nprogbar = ProgbarLogger(count_mode=\"steps\")\nprogbar.set_model(model)\ntb = TensorBoard(config[\"log-dir\"], batch_size=config[\"batch_size\"])\ntb.set_model(model)\n\n\nclass printbatch(Callback):\n def on_batch_end(self, epoch, logs={}):\n print(logs)\n\n\npb = printbatch()\n\n\"\"\"\nBegin training loop\n\"\"\"\nbatch_no = 0\nepoch = 1\n\n# Setup the data_augmentation\n# TODO: Make this less of a mess???\nhand_generator = HandGenerator(\n f=data_handler_handcam.f, batch_size=config[\"batch_size\"]\n)\n\ntrain_generator = TrainGenerator(\n f=uw_handler.f,\n train_indicies=uw_handler.train_indicies,\n num_classes=uw_handler.num_classes,\n batch_size=config[\"batch_size\"],\n)\n\ndata_augmentation = DataAugmentation(\n image_generator=train_generator,\n hand_generator=hand_generator,\n rotations=True,\n center_crop=(224, 224),\n batch_size=config[\"batch_size\"],\n)\n\nvalidation_generator = ValidationGenerator(\n f=uw_handler.f,\n validation_indicies=uw_handler.validation_indicies,\n num_classes=uw_handler.num_classes,\n batch_size=config[\"batch_size\"],\n)\n\nvalidation_augmentation = DataAugmentation(\n image_generator=validation_generator,\n hand_generator=hand_generator,\n center_crop=(224, 224),\n rotations=True,\n batch_size=config[\"batch_size\"],\n)\n\n# print(next(validation_augmentation)[0].shape)\n\n# while True:\n# next(train_generator)\n#\n# steps_per_epoch = int(np.floor(data_handler.num_examples/config['batch_size']))\nfake_steps_per_epoch = 100\n\n\ncallback_list = [tb, checkpointer, progbar]\n\n# while True:\n# im_batch, _ = next(data_augmentation)\n# cv2.imshow('frame', im_batch[0,:,:,0:3])\n# cv2.waitKey(34)\n# print('im')\n\n\nmodel.fit_generator(\n data_augmentation,\n steps_per_epoch=fake_steps_per_epoch,\n epochs=config[\"num_epochs\"],\n verbose=1,\n callbacks=callback_list,\n validation_data=validation_augmentation,\n validation_steps=10,\n class_weight=uw_handler.object_percentage,\n workers=1,\n use_multiprocessing=False,\n max_queue_size=100,\n)\n" ]
[ [ "tensorflow.python.keras.callbacks.ProgbarLogger", "tensorflow.python.keras.callbacks.ModelCheckpoint", "tensorflow.python.keras.optimizers.Adam", "tensorflow.python.keras.callbacks.TensorBoard" ] ]