repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
0xbadc0ffe/FedSimulate
[ "1dfa6554c341ea46c02986fca1b61148ea8cd1f3" ]
[ "FedSimulate.py" ]
[ "from __future__ import print_function, division\nfrom cProfile import label\nfrom logging import raiseExceptions\n\nfrom typing import Mapping, Union, Optional, Callable, Dict\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport os\nfrom tqdm import tqdm, trange\nfrom torchsummary import summary\nimport utils_alg\n\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\nimport platform\nimport models\nimport nn_utils\nimport copy\n\nfrom timeit import default_timer as timer\nfrom datetime import timedelta\n\nimport matplotlib.pyplot as plt\n\ndef clean():\n plat = platform.system()\n if plat == \"Windows\":\n os.system(\"cls\")\n else:\n os.system(\"clear\")\n\n\n\nclass FedDevice():\n\n def __init__(self, trainer:Union[nn_utils.Trainer, nn_utils.MFTrainer], state_dict, tag:str, pk:float, mask_weights:torch.tensor=None, nk=1):\n self.trainer = trainer\n self.state_dict=copy.deepcopy(state_dict)\n self.tag = tag # Device label\n self.pk = pk # probability to be picked for a training round\n self.nk = nk # we can assign here the Device weight (e.g number of examples/tot or some other weighing logic)\n self.mask_weights = mask_weights\n self.major_class = self.eval_major_class()\n\n def __str__(self):\n return f\"Device {self.tag} | Rounds Completed: {self.trainer.rounds_completed}\"\n\n def round_fit(self, model):\n acc, loss = self.trainer.round_fit_from_checkpoint(model, checkpoint=self.state_dict)\n self.state_dict = copy.deepcopy(model.state_dict())\n return acc, loss\n\n def load_state_dict(self, state_dict):\n self.state_dict = copy.deepcopy(state_dict)\n\n def set_mu(self, mu):\n self.trainer.mu=mu\n\n def free(self):\n self.state_dict=None\n\n def eval_major_class(self):\n if self.mask_weights is None or not (self.mask_weights-1).any():\n return None # all weights are 1\n else:\n return torch.argmax(self.mask_weights)\n\n\nclass FedServer():\n\n def __init__(self, model, trainer, tag:str=\"server\", weights_generator:Union[Callable,str]=None):\n self.model = model\n self.state_dict=model.state_dict()\n self.tag = tag\n self.trainer = trainer\n self.updates_cnt = 0\n if weights_generator is None or weights_generator == \"average\":\n self.gen_method = self.dicts_avg\n self.weights_generator = \"average\"\n elif weights_generator == \"first\":\n self.gen_method = self.dicts_first\n self.weights_generator = \"first\"\n elif weights_generator == \"top-k_avg\":\n self.gen_method = self.dicts_top_k_avg\n self.weights_generator = \"top-k_avg\"\n else:\n self.weights_generator = \"custom\"\n self.gen_method = weights_generator\n\n\n def __str__(self):\n return f\"Device {self.tag} | Rounds Completed: {self.updates}\"\n\n def round_fit(self, model):\n acc, loss = self.trainer.round_fit_from_checkpoint(model, checkpoint=self.state_dict)\n self.state_dict = copy.deepcopy(model.state_dict())\n return acc, loss\n\n def update(self, *args):\n result = self.gen_method(*args)\n self.updates_cnt += 1 \n if self.weights_generator == \"custom\":\n self.load_state_dict(result)\n \n # Takes the average of the dicts as the new server state dict.\n def dicts_avg(self, wk_list):\n if wk_list is None or len(wk_list) == 0:\n self.model.load_state_dict(self.state_dict)\n return None\n if len(wk_list) == 1:\n self.state_dict = copy.deepcopy(wk_list[0])\n self.model.load_state_dict(self.state_dict)\n return self.state_dict\n # cloning first element in state_dict\n self.state_dict = copy.deepcopy(wk_list[0])\n for key in wk_list[0]:\n tot = wk_list[0][key]\n for client_wk in wk_list[1:]:\n tot = tot + client_wk[key]\n self.state_dict[key] = tot/len(wk_list)\n # cloning result in model_dict\n self.model.load_state_dict(self.state_dict)\n return self.state_dict\n\n # Pick the first of the list as the new server state dict.\n # If the list is given already ordered by accuracy/loss or wethever this \n # will be like picking the most fitting trained instance.\n # ALERT: is not advisible to use this when heterogenious clients data is \n # involved or when the single clients trainings differ.\n # dicts_avg can be also used in this way by giving a singleton list with \n # the maximal state_dict\n def dicts_first(self, wk_list):\n if wk_list is None or len(wk_list) == 0:\n self.model.load_state_dict(self.state_dict)\n return None\n else:\n self.state_dict = copy.deepcopy(wk_list[0])\n self.model.load_state_dict(self.state_dict)\n return self.state_dict\n\n def dicts_top_k_avg(self, wk_dict, perform, K):\n if wk_dict is None or len(wk_dict) == 0:\n self.model.load_state_dict(self.state_dict)\n return None\n elif len(wk_dict) == 1:\n self.state_dict = copy.deepcopy(list(wk_dict.values())[0])\n self.model.load_state_dict(self.state_dict)\n return self.state_dict\n else:\n K = max(1,K)\n top_devs = {k: v for k, v in sorted(perform.items(), key=lambda item: item[1])[::-1]}\n top_devs = list(top_devs.keys())[:K]\n top_k_weights = [ wk_dict[tag] for tag in top_devs]\n self.state_dict = copy.deepcopy(top_k_weights[0])\n for key in top_k_weights[0]:\n tot = top_k_weights[0][key]\n for client_wk in top_k_weights[1:]:\n tot = tot + client_wk[key]\n self.state_dict[key] = tot/len(top_k_weights) # len(..) can differ by K if top_devs is shorter\n self.model.load_state_dict(self.state_dict)\n return self.state_dict\n\n\n def load_state_dict(self, state_dict):\n self.model.load_state_dict(state_dict)\n self.state_dict = copy.deepcopy(state_dict)\n\n def test(self):\n return self.trainer.test(self.model)\n\n def set_mu(self, mu):\n self.trainer.mu=mu\n\n\ndef update_weights(devices_list, server_weights):\n for dev in devices_list:\n dev.load_state_dict(server_weights)\n\ndef update_mu(devices_list, mu):\n for dev in devices_list:\n dev.set_mu(mu)\n\ndef free_all(devices_list):\n for dev in devices_list:\n dev.free()\n\n\nnn_utils.set_reproducibility()\nclean()\n\n\n\n##### Dataset\n\nn_channels = 3\ninput_size_w = 32\ninput_size_h = 32\ninput_size = input_size_w*input_size_h\n\n\n\n##### Model Hyper params\n\n# Multi Layer Perceptron\n# n_hidden = 9\n#model = models.MLP(input_size, n_channels, n_hidden, models.CIFAR10_output_size)\n\n# Convolutional Nerual Network\nn_features = 12\nmodel = models.CNN(input_size, n_channels, n_features, models.CIFAR10_output_size)\n\n\n\n##### Training Hyper params\n\ndevice = torch.device(\"cpu\") #torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ntrain_dict = {\n \"device\": device,\n \"output_dim\": models.CIFAR10_output_size, # 10\n \"epochs\": 1,\n \"batch_size\" : 128,\n \"batch_size_val\" : 1000,\n \"data_transform\" : \"RGB\",\n \"opt_name\" : \"Adam\",\n \"lr\": 0.003,\n \"momentum\": 0.1,\n \"scheduler_bool\": True,\n \"gamma\": 0.9,\n #\"perm\": nn_utils.permute_pixels,\n \"mu\": 0.005 # if mu=0 => FedAvg\n}\n\nmodel.to(device)\ntest_trainer = False\nif test_trainer:\n #trainer = nn_utils.Trainer(model=model, train_dict=train_dict) # Model-based trainer\n trainer = nn_utils.MFTrainer(train_dict=train_dict) # Model-free trainer\n print(trainer.fit(model))\n\n\n#### FedAVG/Prox Hyper params\n\nemulated_devices = 200\nrounds = 15\ntrain_loner = True\npool = 20 # pool = emulated_devices => FedAvg\np_uniform = pool/emulated_devices # uniform probability to be choosed\nadaptive_mu = False\nadaptive_phase = 5\nmu_inc = 0.1\n\n# Synthetic Data Heterogeneity (alpha = beta = 0 homogeneous case)\n# Imbalance follows this power law : clip(exp(vals*-alpha*numb_of_classes)+beta, min=0)\nalpha = 0.09 # power factor\nbeta = 0 #0.2 # constant factor\n\ndevices_list = []\nw_generators = [\"average\", \"first\", \"top-k_avg\"]\nweights_generator = w_generators[0]\npick_top_k = pool # for top-k_avg\n\nfn_list = [\"uniform\", \"normal\"]\nsample_prob_fn = fn_list[0]\n\nif sample_prob_fn == \"uniform\":\n sample_prob = lambda : np.random.uniform(0,1)\n\n# Note: this is not so usefull in an unifrom device probability scenario\nelif sample_prob_fn == \"normal\":\n from scipy.stats import norm\n norm_mean = 0.5\n sigma = 0.3/emulated_devices\n sample_prob = lambda : norm.cdf(np.random.uniform(-4,4)) #np.random.normal(norm_mean, sigma)\n\n\n# Test Sampling\n#utils_alg.test_sampling(pool, emulated_devices, sample_prob)\n\n\n\n\n\n# Using a single data loaders pair for the homegenous case may improve the \n# performances but it could also not be ideal for some specfic models:\n# https://stackoverflow.com/questions/60311307/how-does-one-reset-the-dataloader-in-pytorch \ntrain_loader, test_loader, train_dataset, test_dataset = models.get_CIFARloaders(train_dict[\"batch_size\"],train_dict[\"batch_size_val\"],train_dict[\"data_transform\"], ret_datasets=True)\ndata_loaders = (train_loader, test_loader)\n\n\n# Server Device Initialization\ntrainer = nn_utils.MFTrainer(data_loaders=data_loaders, train_dict=train_dict) # this is needed only for the testing phase\nserver = FedServer(model, trainer, tag=\"server\", weights_generator=weights_generator)\n\n\nif train_loner:\n train_dict_loner = copy.deepcopy(train_dict)\n train_dict_loner[\"mu\"] = 0 \n # Loner Device used for comparison (weights do not update with server)\n trainer = nn_utils.MFTrainer(data_loaders=data_loaders, train_dict=train_dict_loner)\n loner = FedDevice(trainer=trainer, state_dict=server.state_dict, tag=\"loner\", pk=1)\n\n\n# Initializating devices to emulate\nfor i in range(emulated_devices):\n # resetting state_dict is not necessary since they are gonna train after a global model update by the server\n # nn_utils.reset_model_params(model) \n # initial_state_dict = model.state_dict()\n\n # Note: hard_mask=True slows the Decives Initialization but is more realistic (expecially when simulating few devices)\n train_loader, mask_weights = utils_alg.SIP(train_dataset, torch.arange(models.CIFAR10_output_size), train_dict[\"batch_size\"], alpha=alpha, beta=beta, hard_mask=True)\n data_loaders = (train_loader, data_loaders[1])\n\n trainer = nn_utils.MFTrainer(data_loaders=data_loaders, train_dict=train_dict)\n dev = FedDevice(trainer=trainer, state_dict=None, tag=str(i), pk=p_uniform, mask_weights=mask_weights)\n devices_list.append(dev)\n print(f\"Building Federation Clients (devices): {i}/{emulated_devices}\", end=\"\\r\")\n\n\n# Test initial accuracy\ntest_out, test_string = devices_list[0].trainer.test(model)\ninit_loss = test_out[\"loss_averager\"](None).detach().numpy()\nprint(\"\\n\\n\"+test_string)\n\n\n# Testing FedAvg\nseq_runs = 0 # counts the number of sequential model training (counting the loner device also) \nstart_time = timer() # timer to get the total elapsed time\nsampled = [] # store at each round the number of sampled devices (mean should be the pool value)\nserver_acc = [] # store at each round the server accuracy\nmean_client_acc = [] # store at each round the mean of clients' accuracy\nserver_loss = [] # store at each round the server loss\nbest_dev = [] # store at each round the client device with best accuracy\ntot_masks = torch.zeros(mask_weights.shape) # store the sum of the weights of the different masks\n\n# Initializing accuracy of the untrained model \nserver_acc.append(test_out[\"accuracy\"])\nmean_client_acc.append(test_out[\"accuracy\"]) \n\nif train_loner:\n loner_loss = [] # store at each round the loner loss\n loner_acc = [] # store at each round the loner device accuracy\n # Initializing accuracy of the untrained model \n loner_acc.append(test_out[\"accuracy\"])\n\nfor round in range(1,rounds+1):\n \n round_weights = {}\n round_sampled_devices = []\n\n # Sampling phase\n for dev in devices_list:\n if sample_prob() <= dev.pk:\n round_sampled_devices.append(dev)\n tot_masks += dev.mask_weights\n sampled_len = len(round_sampled_devices)\n sampled.append(sampled_len)\n update_weights(round_sampled_devices, server.state_dict) # more efficient, we update only this round working devices \n\n \n print(\"\\n##########################################\\n\")\n sampled_len = len(round_sampled_devices)\n print(f\"\\n\\n## Round {round}/{rounds} | Selected: {sampled_len}\\n\")\n\n # Training\n sum_acc = 0\n max_acc = 0\n bdev = None # best device tag\n client_perform = {}\n for i, dev in enumerate(round_sampled_devices):\n print(f\"Training Client {i+1}/{sampled_len}:\\n\")\n acc, _ = dev.round_fit(server.model)\n client_perform[dev.tag] = acc\n if acc > max_acc:\n max_acc = acc\n bdev = int(dev.tag)\n sum_acc += acc\n print(str(dev) + f\"/{round} | Accuracy: {acc} % | Major class: {dev.major_class} | Device hash: {nn_utils.state_hash(dev.state_dict)}\\n\")\n # print(f\"\\nDevice hash: {nn_utils.state_hash(dev.state_dict)}\\n\")\n print(\"-----------------------------\\n\")\n round_weights[dev.tag] = dev.state_dict\n seq_runs += 1\n\n if sampled_len != 0:\n mean_acc = sum_acc/sampled_len\n best_dev.append(bdev)\n else:\n if len(mean_client_acc)!=0:\n mean_acc = mean_client_acc[-1]\n else:\n mean_acc = sum_acc\n mean_client_acc.append(mean_acc)\n\n\n if train_loner:\n # Training the loner\n print(f\"Training Loner device:\\n\")\n acc, lon_loss = loner.round_fit(server.model)\n loner_acc.append(acc)\n loner_loss.append(lon_loss.numpy())\n print(str(loner) + f\"/{round} | Accuracy: {acc} % | Device hash: {nn_utils.state_hash(loner.state_dict)}\\n\")\n print(\"-----------------------------\\n\")\n seq_runs+=1\n\n # Updating server weights \n if weights_generator == \"average\":\n server.update(list(round_weights.values()))\n elif weights_generator == \"first\":\n if bdev is not None:\n server.update([round_weights[str(bdev)]])\n elif weights_generator == \"top-k_avg\":\n server.update(round_weights, client_perform, pick_top_k)\n else:\n raise Exception(\"Unknwown weights generation policy\")\n\n # Testing server\n test_out, test_string = server.test()\n server_acc.append(test_out[\"accuracy\"])\n round_server_loss = test_out[\"loss_averager\"](None).detach().numpy()\n server_loss.append(round_server_loss)\n print(f\"\\n\\n** Round {round}/{rounds} completed **\\n\")\n print(\"Sever \" + test_string+\"\\n\")\n print(f\"Server hash: {nn_utils.state_hash(server.state_dict)}\") # must be equal\n print(f\"Model hash: {nn_utils.state_hash(server.model.state_dict())}\\n\") # must be equal\n\n # Adaptive mu\n if adaptive_mu and round % adaptive_phase == 0:\n if init_loss - round_server_loss > 0:\n update_mu(devices_list, max(0,server.trainer.mu-mu_inc))\n server.set_mu(max(0,server.trainer.mu-mu_inc))\n else:\n update_mu(devices_list, max(0,server.trainer.mu+mu_inc))\n server.set_mu(max(0,server.trainer.mu-mu_inc))\n\n # Free (None overwrite) the selected devices state_dict to keep memory occupancy low\n # Since every device has its own copy of state_dict we would end with high memory allocated \n free_all(round_sampled_devices)\n\n\nend_time = timer()\nprint(f\"\\n\\n###########################################\\n\")\nprint(f\"\\nDevices: {emulated_devices} [ Alpha: {alpha} | Beta: {beta} ]\")\nprint(f\"\\nAvg pool per round: {sum(sampled)/rounds} [sample prob fn: {sample_prob_fn} | Expected pool: {pool}]\")\nprint(f\"\\nFinal Mu: {server.trainer.mu} | Weights generator: {server.weights_generator}\")\n\nif weights_generator == w_generators[2]:\n print(f\"Avg of top {pick_top_k} clients\")\nprint(f\"\\nRunned trainings: {seq_runs} [Rounds: {rounds}]\\n\")\nprint(f\"Sever | Rounds completed: {rounds} | Accuracy: {test_out['accuracy']} % | Device hash: {nn_utils.state_hash(server.state_dict)}\")\nprint(f\"Clients avg | Rounds completed: {rounds} | Accuracy: {np.round(mean_client_acc[-1],2)} % | Device hash: --- \")\nif train_loner:\n print(str(loner) + f\" | Accuracy: {acc} % | Device hash: {nn_utils.state_hash(loner.state_dict)}\\n\")\n\nprint(f\"Elapsed time: {timedelta(seconds=end_time-start_time)}\")\n\nprint(f\"\\n\\nTraining Dictionary: {train_dict}\")\n\n\n# Sampled devices per round \nplt.figure(1)\nplt.plot(range(1,rounds+1), sampled, label=\"sampled clients\")\nplt.plot(range(1,rounds+1), [pool]*len(sampled), label=\"expected avg\")\nplt.title(f\"Sampled clients [tot: {np.sum(sampled)}]\")\nplt.xlabel(\"round\")\nplt.ylabel(f\"#\")\nplt.legend()\n\n# Accuracy\nplt.figure(2)\nplt.plot(server_acc, color=\"red\", label=\"server\")\nplt.plot(mean_client_acc, color=\"blue\",linestyle=\"--\", label=\"clients-avg\")\nif train_loner:\n plt.plot(loner_acc, color=\"green\", label=\"loner\")\nplt.title(f\"Accuracy\")\nplt.xlabel(\"round\")\nplt.ylabel(f\"%\")\nplt.legend()\n\n# Test Loss (Server vs loner, clients Loss is not comparable)\nplt.figure(3)\nplt.plot(server_loss, color=\"red\", label=\"server\")\nif train_loner:\n plt.plot(loner_loss, color=\"green\", label=\"loner\")\nplt.title(f\"Test Loss\")\nplt.xlabel(\"round\")\nplt.ylabel(f\"Loss\")\nplt.legend()\n\n\n# Devices usage histogram\nplt.figure(4)\n# hist_data = []\n# for dev in devices_list:\n# hist_data = hist_data + [int(dev.tag)]*dev.trainer.rounds_completed\n# plt.hist(hist_data, emulated_devices)\nhist_data = {}\nfor dev in devices_list:\n hist_data[int(dev.tag)]=dev.trainer.rounds_completed\nplt.bar(hist_data.keys(), hist_data.values())\nplt.title(\"Devices usage\")\nplt.xlabel(\"Device Tag\")\nplt.ylabel(\"Usage\")\n\n\n# Best Devices\nplt.figure(5)\nplt.hist(best_dev, emulated_devices)\nplt.title(f\"Best Devices per round\")\nplt.xlabel(\"Device Tag\")\nplt.ylabel(f\"Round Winner counter\")\n\n\n# Distribution of Data\nplt.figure(6)\n#plt.plot(tot_masks.numpy()/np.sum(sampled))\nplt.bar(np.arange(len(tot_masks)), tot_masks.numpy()/np.sum(sampled))\nplt.title(f\"Average clients training data usage for each class\")\nplt.xlabel(\"Class\")\nplt.ylabel(f\"%\")\n\n\n\nmajor_classes = []\nfor dev in devices_list:\n if dev.major_class is not None:\n major_classes.append(dev.major_class)\nif len(major_classes)>1:\n plt.figure(7)\n plt.title(\"Major Classes Distribution\")\n plt.bar(np.arange(len(major_classes)), major_classes)\n plt.ylabel(\"Class\")\n plt.xlabel(\"Device Tag\")\n\n\n\nplt.show()\n\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.sum", "matplotlib.pyplot.title", "torch.zeros", "torch.argmax", "matplotlib.pyplot.plot", "numpy.random.uniform", "matplotlib.pyplot.ylabel", "numpy.round", "torch.arange", "torch.device", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "matplotlib.pyplot.figure" ] ]
ygCoconut/volume2stl
[ "bd95fc39620afd21ce08c8c805ac213583d9daaa" ]
[ "1_benchmarking/spinecode/skel.py" ]
[ "import os\nimport collections\nfrom collections import namedtuple\n\nimport scipy.sparse as sp\nfrom scipy.sparse import csgraph\nfrom scipy.spatial import KDTree\nfrom scipy.ndimage.morphology import distance_transform_edt\nimport numpy as np\nimport h5py\nimport kimimaro\nimport cloudvolume as cv\n\nfrom . import u\n\n\nFakeSkeleton = namedtuple(\"FakeSkeleton\",\n [\"vertices\", \"edges\", \"radii\",\n \"vertex_types\", \"ind_map\"])\n\n\ndef skeletonize_around_pt(cvol, pt, segid, kimi_params,\n bbox_width=(125, 125, 100)):\n\n bbox = u.make_bbox([pt], bbox_width)\n\n seg = cvol[bbox[0][0]:bbox[1][0],\n bbox[0][1]:bbox[1][1],\n bbox[0][2]:bbox[1][2]]\n\n return skeletonize_seg(seg, segid, kimi_params)\n\n\ndef skeletonize_seg(seg, segid, kimi_params):\n return kimimaro.skeletonize(seg, object_ids=[segid], **kimi_params)[segid]\n\n\ndef label_skeleton(skel, count_thr=3, dist_thr=200, radius_buffer=50,\n path_inds=None, path_radii=None, root=None):\n \"\"\"\n Labels the spines of a skeleton using (mostly) the number of\n paths passing through each node. Performs other post-processing\n of a hard threshold to try to reach the base of each spine.\n\n Label 1 => dendritic spine\n Label 0 => dendritic shaft\n \"\"\"\n if (path_inds is None) or (path_radii is None):\n path_inds, path_radii = compute_path_info(skel, root=root)\n path_counts = node_path_counts(path_inds)\n\n num_nodes = len(skel.vertices)\n labels = np.ones((num_nodes,), dtype=np.uint8)\n # Labeling an initial set of shaft seeds\n labels[path_counts > count_thr] = 0\n\n dists, seed_inds = path_distance_transform(path_inds, labels)\n\n to_label = ((dists < dist_thr) &\n (skel.radii > skel.radii[seed_inds] - radius_buffer))\n prelim_labels = labels.copy()\n prelim_labels[to_label] = 0\n\n return clean_prelim_labels(path_inds, prelim_labels, labels)\n\n\ndef label_skeleton_pathcount(skel, count_thr=6, root=None):\n \"\"\"\n Labels the spines of a skeleton using (solely) the number\n of paths passing through each node.\n \"\"\"\n path_inds, path_radii = compute_path_info(skel, root=root)\n num_nodes = len(skel.vertices)\n counts = node_path_counts(path_inds, num_nodes)\n\n labels = np.ones((num_nodes,), dtype=np.uint8)\n labels[counts > count_thr] = 0\n # removing nodes from a root (these have been a spine in the past)\n # labels[counts == counts.max()] = 1\n\n return labels\n\n\ndef extract_spine_near_pts(skel, labels, pts, path_inds=None, root=None,\n cv_skel=True, return_all_paths=False, kdt=None):\n \"\"\"\n Extracts the stretches of spine-labeled skeleton closest to a given\n list of landmark points (often synapses).\n \"\"\"\n if path_inds is None:\n path_inds, _ = compute_path_info(skel, root=root, cv_skel=cv_skel)\n\n if kdt is None:\n kdt = KDTree(skel.vertices)\n\n spines = list()\n for pt in pts:\n closest_node = find_closest_node(skel, pt, kdtree=kdt)\n path = find_containing_paths(path_inds, closest_node, max_len=True)\n spine, root = extract_spine_from_path(path, labels, closest_node)\n all_prongs = find_containing_paths(path_inds, root)\n if len(all_prongs) > 1 and return_all_paths:\n # multi-pronged spine\n spine_prongs = [extract_spine_from_path(prong, labels, root)[0]\n for prong in all_prongs]\n spine = np.unique(np.hstack(spine_prongs))\n\n spines.append((spine, root))\n\n return spines\n\n\ndef extract_spine_by_node(skel, labels, nodes, path_inds=None, root=None,\n cv_skel=True, return_all_paths=False):\n if path_inds is None:\n path_inds, _ = compute_path_info(skel, root=root, cv_skel=cv_skel)\n\n spines = list()\n for node in nodes:\n path = find_containing_paths(path_inds, node, max_len=True)\n spine, root = extract_spine_from_path(path, labels, node)\n all_prongs = find_containing_paths(path_inds, root)\n if len(all_prongs) > 1 and return_all_paths:\n # multi-pronged spine\n spine_prongs = [extract_spine_from_path(prong, labels, root)[0]\n for prong in all_prongs]\n spine = np.unique(np.hstack(spine_prongs))\n\n spines.append((spine, root))\n\n return spines\n\n\ndef compute_path_info(skel, root=None, cv_skel=True):\n path_inds = paths(skel, root=root, cv_skel=cv_skel)\n path_radii = [skel.radii[inds] for inds in path_inds]\n\n return path_inds, path_radii\n\n\ndef skel_by_inds(skel, inds):\n assert len(inds) > 0, \"empty inds\"\n new_verts = skel.vertices[inds]\n new_vtypes = skel.vertex_types[inds]\n new_radii = skel.radii[inds]\n\n ind_map = np.empty((max(inds)+1,), dtype=inds.dtype)\n ind_map[inds] = np.arange(len(inds))\n edge_inds = np.all(np.isin(skel.edges, inds), axis=1)\n new_edges = ind_map[skel.edges[edge_inds]]\n\n return FakeSkeleton(new_verts, new_edges, new_radii, new_vtypes, ind_map)\n\n\ndef translate_skel_ids(skel, ids):\n \"\"\"\n Maps vertex ids to those of a view (FakeSkeleton)\n \"\"\"\n return skel.ind_map[ids]\n\n\ndef paths_containing_pair(skel, root, other, single=False,\n max_len=False, cv_skel=True):\n path_inds = paths(skel, root=root, return_inds=True, cv_skel=cv_skel)\n cpaths = [inds for inds in path_inds if\n np.all(np.isin((root, other), inds))]\n\n if single:\n assert len(cpaths) == 1, \"both nodes contained in multiple paths\"\n return cpaths[0]\n\n if max_len:\n return max(cpaths, key=len)\n\n return cpaths\n\n\ndef find_furthest_pt(skel, root, single=True):\n num_nodes = len(skel.vertices)\n edges = skel.edges\n g = sp.coo_matrix((np.ones(len(edges),), (edges[:, 0], edges[:, 1])),\n shape=(num_nodes, num_nodes))\n o = csgraph.breadth_first_order(g, root, directed=False,\n return_predecessors=False)\n\n furthest_node = o[-1]\n\n o2, preds = csgraph.breadth_first_order(g, furthest_node, directed=False,\n return_predecessors=True)\n\n path_inds = reconstruct_all_paths(preds)\n paths = [inds for inds in path_inds if root in inds]\n\n if single:\n assert len(path) == 1, \"Too many paths\"\n return furthest_node, paths[0]\n\n else:\n return furthest_node, paths\n\n\ndef medium_radius_cut_pt(skel, path_inds, simplest_vers=True):\n radii = skel.radii[path_inds]\n half_len = len(path_inds) // 2\n min_close_rad_i = np.argmin(radii[:half_len])\n max_far_rad_i = np.argmax(radii[half_len:]) + half_len\n\n if simplest_vers:\n local_cut_i = (min_close_rad_i + max_far_rad_i) // 2\n return path_inds[local_cut_i], local_cut_i\n\n linear_dist = ((radii[min_close_rad_i:max_far_rad_i] - \n radii[min_close_rad_i]) /\n (radii[max_far_rad_i] - radii[min_close_rad_i]))\n\n # shifting window to acct for large bumps (e.g. multi-headed spines)\n min_i = np.max(np.nonzero(linear_dist < 1/5)[0], initial=0)\n \n try:\n local_cut_i = (np.nonzero(linear_dist[min_i:] > 1/3)[0].min() +\n min_close_rad_i + min_i)\n except Exception:\n local_cut_i = min_close_rad_i + min_i\n\n return path_inds[local_cut_i], local_cut_i\n\n\ndef reconstruct_all_paths(preds):\n leaves = np.nonzero(~np.isin(np.arange(len(preds)), preds))[0]\n return [reconstruct_path(preds, leaf) for leaf in leaves]\n\n\ndef reconstruct_path(preds, leaf):\n path = []\n curr = leaf\n while curr >= 0:\n path.append(curr)\n curr = preds[curr]\n return path\n\n\ndef node_path_counts(path_inds, num_nodes=None):\n if num_nodes is None:\n num_nodes = max(map(max, path_inds)) + 1\n\n counts = np.zeros((num_nodes,), dtype=np.uint16)\n\n for inds in path_inds:\n counts[inds] += 1\n\n return counts\n\n\ndef paths(skel, root=None, return_inds=True, cv_skel=True):\n\n if cv_skel:\n paths = []\n for tree in skel.components():\n paths += single_tree_paths(skel, tree, root=root,\n return_inds=return_inds)\n else:\n # FakeSkeleton, assumed to be single tree\n paths = single_tree_paths(skel, skel, root=root, cv_skel=False,\n return_inds=return_inds)\n return paths\n\n\ndef single_tree_paths(skel, tree, root=None, return_inds=True, cv_skel=True):\n\n if cv_skel:\n tree = tree.consolidate()\n\n if root is not None:\n # check whether this node exists within the tree\n if not isinstance(root, collections.Iterable):\n root = tuple(skel.vertices[root])\n\n tree_lookup = {tuple(v): i for (i, v) in enumerate(tree.vertices)}\n # If it's not in the tree, nullify it\n root = tree_lookup.get(root, None)\n\n edges = tree.edges\n num_nodes = edges.max() + 1\n g = sp.coo_matrix((np.ones(len(edges),), (edges[:, 0], edges[:, 1])),\n shape=(num_nodes, num_nodes))\n\n def dfs_paths(g, root):\n o, preds = csgraph.depth_first_order(g, root, directed=False,\n return_predecessors=True)\n return reconstruct_all_paths(preds)\n\n if root is None:\n init_paths = dfs_paths(g, edges[0, 0])\n root_path = np.argmax([len(p) for p in init_paths])\n root = init_paths[root_path][0]\n\n tree_paths = dfs_paths(g, root)\n path_vertices = [tree.vertices[path] for path in tree_paths]\n\n if return_inds:\n # Have to do this since the consolidated tree inds =/= global inds\n vertex_lookup = {tuple(v): i for (i, v) in enumerate(skel.vertices)}\n return [np.array([vertex_lookup[tuple(v)]\n for v in single_path_vertices])\n for single_path_vertices in path_vertices]\n\n else:\n return path_vertices\n\n\ndef path_distance_transform(path_inds, all_labels):\n\n distances = np.ones(all_labels.shape) * np.inf\n # init seeds to identity except non-zero labels\n seed_inds = np.arange(len(all_labels))\n seed_inds[all_labels != 0] = 0\n\n for path in path_inds:\n path_dists = all_labels[path]\n if np.all(path_dists == 1):\n continue\n dists, inds = distance_transform_edt(path_dists,\n return_distances=True,\n return_indices=True)\n # inds returned as 2d for some reason\n inds = inds[0, :]\n to_change = dists < distances[path]\n inds_to_change = path[to_change]\n\n distances[inds_to_change] = dists[to_change]\n seed_inds[inds_to_change] = path[inds[to_change]]\n\n return distances, seed_inds\n\n\ndef clean_prelim_labels(path_inds, prelim_labels, labels):\n for (i, p) in enumerate(path_inds):\n p_seeds = labels[p]\n p_expanded = prelim_labels[p]\n\n seed_inds = np.nonzero(p_seeds == 0)[0]\n if seed_inds.size == 0:\n continue\n\n if seed_inds[0] != 0:\n spines_before_seed = np.nonzero(p_expanded[:seed_inds[0]])[0]\n if spines_before_seed.size > 0:\n last_spine_before_seed = spines_before_seed[-1]\n prelim_labels[p[:last_spine_before_seed]] = 1\n\n if seed_inds[-1] != len(p) - 1:\n spines_after_seed = (np.nonzero(p_expanded[seed_inds[-1]:])[0] +\n seed_inds[-1])\n if spines_after_seed.size > 0:\n first_spine_after_seed = spines_after_seed[0]\n prelim_labels[p[first_spine_after_seed:]] = 1\n\n return prelim_labels\n\n\ndef find_closest_node(skel, pt, kdtree=None):\n\n if kdtree is None:\n kdtree = KDTree(skel.vertices)\n\n return kdtree.query(pt)[1]\n\n\ndef find_containing_paths(path_inds, closest_node, single=False,\n max_len=False, min_len=False):\n included_paths = [inds for inds in path_inds if closest_node in inds]\n\n if single:\n assert len(included_paths) == 1, (f\"Node {closest_node} included\"\n \" in more than one path\")\n return included_paths[0]\n\n if max_len:\n return max(included_paths, key=len)\n\n if min_len:\n return min(included_paths, key=len)\n\n return included_paths\n\n\ndef extract_spine_from_path(path, labels, included_node):\n path_labels = labels[path]\n\n assert labels[included_node] == 1, (f\"Node {included_node} labeled\"\n \" as non-spine\")\n assert included_node in path, f\"Node {included_node} not in path\"\n\n start = np.nonzero(path == included_node)[0][0]\n i = j = start\n while i > 0 and path_labels[i-1] == 1:\n i -= 1\n while j < len(path_labels) and path_labels[j] == 1:\n j += 1\n\n if i > 0 and path_labels[i-1] == 0:\n return path[i:j], path[i]\n if j < len(path_labels) and path_labels[j] == 0:\n return path[i:j], path[j-1]\n # Entire path is included\n return path, path[0]\n\n\ndef write_skel(skel, filename):\n if os.path.isfile(filename):\n os.remove(filename)\n\n with h5py.File(filename) as f:\n f.create_dataset(\"vertices\", data=skel.vertices)\n f.create_dataset(\"edges\", data=skel.edges)\n f.create_dataset(\"radii\", data=skel.radii)\n f.create_dataset(\"vtypes\", data=skel.vertex_types)\n\n\ndef read_skel(filename):\n assert os.path.isfile(filename)\n\n with h5py.File(filename) as f:\n vertices = f[\"vertices\"][()]\n edges = f[\"edges\"][()]\n radii = f[\"radii\"][()]\n vtypes = f[\"vtypes\"][()]\n\n return cv.PrecomputedSkeleton(vertices, edges, radii, vtypes, 99999)\n" ]
[ [ "numpy.hstack", "scipy.sparse.csgraph.depth_first_order", "numpy.nonzero", "scipy.sparse.csgraph.breadth_first_order", "numpy.ones", "numpy.all", "scipy.spatial.KDTree", "numpy.argmin", "numpy.argmax", "numpy.zeros", "numpy.isin", "scipy.ndimage.morphology.distance_transform_edt" ] ]
zakharovas/RecSys2018
[ "f58ed6716213267ad2cde30d1ff677abc5da96ba" ]
[ "recsys/svd_pp/model.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\nfrom json import loads\nfrom json import dumps\n\nfrom tqdm import tqdm\nfrom itertools import count\nfrom functools import partial\n\n\n# Model params:\nuser_count = 17259\nitem_count = 2149247\n\nbatch_size = 10000\n\nfactor_dim = 32\n\n# Dataset initialization:\n\ndef generator(filenames):\n for filename in filenames:\n with open(filename, 'r') as source:\n for line in source:\n record = loads(line)\n \n user = record['user']\n view = record['view']\n weight = record['view_count']\n\n fill = view + [item_count] * 256\n fill = fill[:256]\n\n for item in view:\n yield user, item, 1, weight, fill\n\n sample = np.random.randint(0, item_count)\n yield user, sample, 0., weight, fill\n\n\ndataset = tf.data.Dataset.from_generator(\n partial(generator, ['encoded_playlists.json']),\n output_types=(tf.int64, tf.int64, tf.float64, tf.float64, tf.int64)\n)\ndataset = dataset.batch(batch_size)\ndataset = dataset.prefetch(32)\n\n# Variables:\nmean = tf.get_variable(name='mu', shape=[])\n\nuser_bias = tf.get_variable(name='b_u', shape=[user_count])\nitem_bias = tf.get_variable(name='b_i', shape=[item_count])\n\nuser_factor = tf.get_variable(name='p_u', shape=[user_count, factor_dim])\nitem_factor = tf.get_variable(name='q_i', shape=[item_count, factor_dim])\n\nitem_interaction = tf.get_variable(name='y_i', shape=[item_count, factor_dim])\n\n\n# Placeholders:\nusers = tf.placeholder(tf.int64, shape=(None,), name='users')\nitems = tf.placeholder(tf.int64, shape=(None,), name='items')\nrates = tf.placeholder(tf.float64, shape=(None,), name='rates')\nviews = tf.placeholder(tf.int64, shape=(None, None), name='views')\nweights = tf.placeholder(tf.float32, shape=(None,), name='length')\n\n# Formulae:\nu = weights[:, tf.newaxis] * tf.reduce_sum(\n tf.nn.embedding_lookup(\n tf.concat([item_interaction, tf.zeros(shape=[1, factor_dim])], axis=0),\n views\n ),\n axis=1\n) + tf.nn.embedding_lookup(user_factor, users)\ni = tf.nn.embedding_lookup(item_factor, items)\n\nestimate = (\n mean +\n tf.nn.embedding_lookup(item_bias, items) +\n tf.nn.embedding_lookup(user_bias, users) +\n tf.reduce_sum(\n tf.multiply(i, u),\n axis=1\n )\n)\n\nloss = tf.losses.log_loss(\n labels=rates,\n predictions=tf.sigmoid(estimate)\n)\n\n# Optimization:\noptimizer = tf.train.AdamOptimizer()\ntraining = optimizer.minimize(loss)\n\nnx = dataset.make_one_shot_iterator().get_next()\n\nconfig = tf.ConfigProto()\nconfig.intra_op_parallelism_threads = 32\nconfig.inter_op_parallelism_threads = 32\n\nwith tf.Session(config=config) as session:\n session.run(tf.global_variables_initializer())\n\n with tqdm() as widget:\n while True:\n try:\n dataline = session.run(nx)\n\n _, loss_value = session.run(\n (training, loss),\n feed_dict={\n users : dataline[0],\n items : dataline[1],\n rates : dataline[2],\n weights : dataline[3],\n views : dataline[4]\n }\n )\n widget.set_postfix({\n 'Loss on current user': loss_value\n })\n widget.update()\n # except tf.errors.OutOfRangeError:\n except:\n break\n\n user_factor = session.run(user_factor)\n user_bias = session.run(user_bias)\n\n with open('user_encoding.json', 'r') as source:\n with open('user_factor.json', 'w') as target:\n for line in tqdm(source):\n encoding = loads(line)\n target.write(dumps({\n 'name' : encoding['key'],\n 'bias' : user_bias[encoding['val']].tolist(),\n 'factor': user_factor[encoding['val'], :].tolist()\n }))\n target.write('\\n')\n\n item_bias = session.run(item_bias)\n item_factor = session.run(item_factor)\n item_interaction = session.run(item_interaction)\n\n with open('item_encoding.json', 'r') as source:\n with open('item_factor.json', 'w') as target:\n for line in tqdm(source):\n encoding = loads(line)\n target.write(dumps({\n 'id' : encoding['key'],\n 'bias' : item_bias[encoding['val']].tolist(),\n 'factor': item_factor[encoding['val'], :].tolist(),\n 'inner' : item_interaction[encoding['val'], :].tolist()\n }))\n target.write('\\n')\n" ]
[ [ "tensorflow.get_variable", "tensorflow.multiply", "tensorflow.zeros", "tensorflow.sigmoid", "tensorflow.placeholder", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "tensorflow.train.AdamOptimizer", "tensorflow.Session", "tensorflow.nn.embedding_lookup", "numpy.random.randint" ] ]
ndow33/DS-Unit-3-Sprint-2-SQL-and-Databases
[ "14cfe850e68e134057fe56c1fea99c32c4473859" ]
[ "module2-sql-for-analysis/titanic.py" ]
[ "import os\nimport psycopg2\nfrom psycopg2.extras import DictCursor\nfrom psycopg2.extras import execute_values\nfrom dotenv import load_dotenv\nimport pandas as pd\nimport json\nimport numpy as np\n\nload_dotenv() # reads the contents of the .env file and adds them to the environment\n\npsycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)\n\nDB_NAME = os.getenv(\"DB_NAME\", default = \"Set the DB_NAME\")\nDB_USER = os.getenv(\"DB_USER\", default = \"Set the DB_USER\")\nDB_PASSWORD = os.getenv(\"DB_PASSWORD\", default = \"Set the DB_PASSWORD\")\nDB_HOST = os.getenv(\"DB_HOST\", default = \"Set the DB_HOST\")\n\n### Connect to ElephantSQL-hosted PostgreSQL\nconnection = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST) \nprint(\"CONNECTION\", type(connection))\n\n### A \"cursor\", a structure to iterate over db records to perform queries\ncursor = connection.cursor(cursor_factory=DictCursor)\nprint(\"CURSOR\", type(cursor))\n\n### An example query\ncursor.execute('SELECT * from test_table;')\n\n### Note - nothing happened yet! We need to actually *fetch* from the cursor\nresult = cursor.fetchall()\nfor row in result:\n print(\"--------\")\n print(type(row))\n print(row)\n\n# Read passenger data from the csv file\nCSV_FILEPATH = os.path.join(os.path.dirname(__file__), \"titanic.csv\")\n\ndf = pd.read_csv(CSV_FILEPATH)\nprint(df.head())\n\n\n# Create a table to store the passengers\ntable_name = \"passengers\"\n\nprint(\"-------------------\")\ntable_creation = f\"\"\"\nCREATE TABLE IF NOT EXISTS {table_name} (\n id SERIAL PRIMARY KEY,\n \"survived\" int4,\n \"pclass\" int4,\n \"name\" text,\n \"sex\" text,\n \"age\" int4,\n \"sib_spouse_count\" int4,\n \"parent_child_count\" int4,\n \"fare\" float8\n);\n\"\"\"\n\n\n\nprint(\"SQL:\", table_creation)\ncursor.execute(table_creation)\n\n# breakpoint()\n\n# Insert Data into the passengers table\n \nlist_of_tuples = list(df.to_records(index=False))\ninsertion_query = f\"INSERT INTO {table_name} (survived, pclass, name, sex, age, sib_spouse_count, parent_child_count, fare) VALUES %s\"\nexecute_values(cursor, insertion_query, list_of_tuples)\n\n# ACTUALLY SAVE THE TRANSACTIONS\n\nconnection.commit()\ncursor.close()\nconnection.close()\n" ]
[ [ "pandas.read_csv" ] ]
Noonewin/ML_scripts
[ "a7019a2c8f138f3622979afd05d435dbeb319f15" ]
[ "clustering/clustering.py" ]
[ "import os\nimport sys\nimport scipy as sp\nfrom matplotlib.mlab import dist\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nvectorizer = CountVectorizer(min_df=1)\n\n# content = [\"How to format my hard disk\", \" Hard disk format problems \"]\n# X = vectorizer.fit_transform(content)\n# print(vectorizer.get_feature_names())\n# print(X.toarray().transpose())\n\nDIR = \"./data\"\nposts = [open(os.path.join(DIR, f)).read() for f in os.listdir(DIR)]\nprint(posts)\nX_train = vectorizer.fit_transform(posts)\nnum_samples, num_features = X_train.shape\nprint(\"#samples: %d, #features: %d\" % (num_samples, num_features))\nprint(vectorizer.get_feature_names())\n\nnew_post = \"imaging databases\"\nnew_post_vec = vectorizer.transform([new_post])\n\n\ndef dist_raw(v1, v2):\n delta = v1 - v2\n return sp.linalg.norm(delta.toarray())\n\n\nbest_doc = None\nbest_dist = sys.maxsize\nbest_i = None\nfor i in range(0, num_samples):\n post = posts[i]\n if post == new_post:\n continue\n\npost_vec = X_train.getrow(i)\n\nd = dist(post_vec, new_post_vec)\n\nprint(\"=== Post %i with dist=%.2f: %s\" % (i, d, post))\n\nif d < best_dist:\n best_dist = d\n best_i = i\n print(\"Best post is %i with dist=%.2f\" % (best_i, best_dist))\n" ]
[ [ "sklearn.feature_extraction.text.CountVectorizer", "matplotlib.mlab.dist" ] ]
milwaukee-city-data/budget
[ "1e07b72d0d58abf2e2dcef875d52e1a2af07b252" ]
[ "carceral/trend_with_rate.py" ]
[ "import csv\nimport os\n\nfrom scipy.signal import savgol_filter\n\nfrom gwpy.time import to_gps\nfrom gwpy.timeseries import TimeSeries\n\nfrom matplotlib import use\nuse(\"Agg\")\n\nfrom matplotlib import font_manager, pyplot, rcParams\n\n\n# set font properties\nfont_dir = os.path.join(os.environ[\"HOME\"], \"Downloads\", \"vollkorn\")\nfor font in font_manager.findSystemFonts(font_dir):\n font_manager.fontManager.addfont(font)\n\n# set font family globally\nrcParams[\"font.family\"] = \"vollkorn\"\n\n\n# -- plotting utilities -----------------------------------------------\n\n\ndef plot_incarcerated_total(data):\n \"\"\"Parse input and plot trends for the incarcerated population\"\"\"\n times = [float(to_gps(t)) for t in data[\"publication_date\"]]\n trends = TimeSeries(data[\"inmate_total\"], times=times) / 1000\n rate = type(trends)(\n savgol_filter(trends.value, 15, 2, deriv=1)\n ) * 1000\n rate.__array_finalize__(trends)\n xticks = [str(n) for n in range(2018, 2022)]\n\n # stand up axes\n (fig, (tax, rax)) = pyplot.subplots(\n nrows=2,\n sharex=True,\n sharey=False,\n figsize=(12, 6),\n )\n\n # plot total population\n tax.plot(trends, color=\"#0d2240\", linewidth=2)\n tax.plot(\n [float(to_gps(\"2019-01-07\"))] * 2,\n [19, 24],\n color=\"#0d2240\",\n alpha=0.6,\n linestyle=\"--\",\n linewidth=1,\n )\n tax.plot(\n [float(to_gps(\"2020-03-25\"))] * 2,\n [19, 24],\n color=\"#0d2240\",\n alpha=0.6,\n linestyle=\"--\",\n linewidth=1,\n )\n tax.set_xlim(\n [\n float(to_gps(\"2018-01-01\")),\n float(to_gps(\"2021-08-06\")),\n ]\n )\n tax.set_xticks(\n [float(to_gps(f\"{yr}-{mo}-01\"))\n for yr in xticks for mo in range(2, 13)\n if to_gps(f\"{yr}-{mo}-01\") < to_gps(\"2021-08-06\")],\n minor=True,\n )\n tax.set_xticks([float(to_gps(f\"{yr}-01-01\")) for yr in xticks])\n tax.set_xticklabels(xticks)\n tax.set_ylabel(r\"Total population ($\\times$ 1000)\")\n tax.set_ylim([19, 24])\n tax.text(\n float(to_gps(\"2019-01-21\")),\n 21.15,\n \"Evers administration\\nbegins\",\n )\n tax.text(\n float(to_gps(\"2020-04-08\")),\n 19.65,\n \"COVID-19 lockdown\\nbegins\",\n )\n tax.grid(color=\"#0d2240\", alpha=0.4, linestyle=\"dotted\")\n\n # plot rate of change\n rax.plot(rate, color=\"#00a8e1\", linewidth=2)\n rax.plot(\n [float(to_gps(\"2019-01-07\"))] * 2,\n [-160, 60],\n color=\"#0d2240\",\n alpha=0.6,\n linestyle=\"--\",\n linewidth=1,\n )\n rax.plot(\n [float(to_gps(\"2020-03-25\"))] * 2,\n [-160, 60],\n color=\"#0d2240\",\n alpha=0.6,\n linestyle=\"--\",\n linewidth=1,\n )\n rax.set_xlabel(\"Calendar year\")\n rax.set_ylabel(\"Growth rate (per week)\")\n rax.set_ylim([-160, 60])\n rax.grid(color=\"#0d2240\", alpha=0.4, linestyle=\"dotted\")\n\n # save figure and return\n return fig.savefig(\"doc-total-percent.png\", bbox_inches=\"tight\")\n\n\n# -- main block -------------------------------------------------------\n\nif __name__ == \"__main__\":\n # load from CSV\n with open(\"doc-population-trends.csv\", \"r\") as datafile:\n data = {\n col[0]: [\n float(value)\n if value.isnumeric()\n else value\n for value in col[1:]\n ]\n for col in list(map(list, zip(*csv.reader(datafile))))\n }\n\n # render population trends as timeseries figures\n plot_incarcerated_total(data)\n" ]
[ [ "matplotlib.font_manager.fontManager.addfont", "matplotlib.use", "matplotlib.pyplot.subplots", "scipy.signal.savgol_filter", "matplotlib.font_manager.findSystemFonts" ] ]
ogierpaul/suricate
[ "fd43627e5d2a92fe4bf7b562f65ab89ec07ee49c" ]
[ "tests/pipeindexer/test_pruningpipe.py" ]
[ "from suricate.pipeline.pruningpipe import PruningPipe\nfrom suricate.data.companies import getXst, getytrue\nfrom suricate.explore import Explorer, KBinsCluster\nfrom suricate.dftransformers import DfConnector, VectorizerConnector, ExactConnector\nfrom suricate.sbstransformers import SbsApplyComparator\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.metrics import precision_score, recall_score, balanced_accuracy_score\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom suricate.preutils import createmultiindex\n\n# ESCONNECTOR\nfrom suricate.dbconnectors import EsConnector\nimport elasticsearch\nfrom suricate.metrics.metrics import get_commonscores\n\n_lr_score_list = [\n ('name_vecword', VectorizerConnector(on='name', analyzer='word', ngram_range=(1, 2))),\n ('street_vecword', VectorizerConnector(on='street', analyzer='word', ngram_range=(1, 2))),\n ('city_vecchar', VectorizerConnector(on='city', analyzer='char', ngram_range=(1, 3))),\n ('countrycode_exact', ExactConnector(on='countrycode')),\n ('duns_exact', ExactConnector(on='duns')),\n ('postalcode_exact', ExactConnector(on='postalcode'))\n\n]\n_lr_score_cols = [c[0] for c in _lr_score_list]\n_sbs_score_list = [\n ('name_fuzzy', SbsApplyComparator(on='name', comparator='simple')),\n ('street_fuzzy', SbsApplyComparator(on='street', comparator='simple')),\n ('name_token', SbsApplyComparator(on='name', comparator='token')),\n ('street_token', SbsApplyComparator(on='street', comparator='token')),\n ('city_fuzzy', SbsApplyComparator(on='city', comparator='simple')),\n ('postalcode_fuzzy', SbsApplyComparator(on='postalcode', comparator='simple')),\n ('postalcode_contains', SbsApplyComparator(on='postalcode', comparator='contains')),\n]\n\ndef test_pruningpipe():\n print('start', pd.datetime.now())\n n_rows = 500\n n_cluster = 25\n n_simplequestions = 50\n n_pointedquestions = 50\n Xst = getXst(nrows=n_rows)\n ixc = createmultiindex(X=Xst)\n y_true = getytrue()\n y_true = y_true.loc[ixc]\n print(pd.datetime.now(), 'data loaded')\n pipe = PruningPipe(\n connector=DfConnector(\n scorer=Pipeline(steps=[\n ('scores', FeatureUnion(_lr_score_list)),\n ('imputer', SimpleImputer(strategy='constant', fill_value=0))]\n )\n ),\n pruningclf=Explorer(clustermixin=KBinsCluster(n_clusters=n_cluster)),\n sbsmodel=FeatureUnion(transformer_list=_sbs_score_list),\n classifier=LogisticRegressionCV()\n )\n pipe.fit(X=Xst, y=y_true)\n y_pred = pipe.predict(X=Xst)\n precision = precision_score(y_true=y_true, y_pred=y_pred)\n recall = recall_score(y_true=y_true, y_pred=y_pred)\n accuracy = balanced_accuracy_score(y_true=y_true, y_pred=y_pred)\n print('***\\nscores:\\n')\n print('precision score:{}\\n recall score:{}\\n balanced accuracy score:{}'.format(\n precision, recall, accuracy))\n\n\ndef test_esconnector():\n print('start', pd.datetime.now())\n n_rows = 500\n n_cluster = 25\n Xst = getXst(nrows=n_rows)\n left = Xst[0]\n esclient = elasticsearch.Elasticsearch()\n scoreplan = {\n 'name': {\n 'type': 'FreeText'\n },\n 'street': {\n 'type': 'FreeText'\n },\n 'city': {\n 'type': 'FreeText'\n },\n 'duns': {\n 'type': 'Exact'\n },\n 'postalcode': {\n 'type': 'FreeText'\n },\n 'countrycode': {\n 'type': 'Exact'\n }\n }\n escon = EsConnector(\n client=esclient,\n scoreplan=scoreplan,\n index=\"right\",\n explain=False,\n size=20\n )\n ixc = createmultiindex(X=Xst)\n y_true = getytrue()\n y_true = y_true.loc[ixc]\n print(pd.datetime.now(), 'data loaded')\n pipe = PruningPipe(\n connector=escon,\n pruningclf=Explorer(clustermixin=KBinsCluster(n_clusters=n_cluster)),\n sbsmodel=FeatureUnion(transformer_list=_sbs_score_list),\n classifier=LogisticRegressionCV()\n )\n pipe.fit(X=left, y=y_true)\n y_pred = pipe.predict(X=left)\n scores = get_commonscores(y_pred=y_pred, y_true=y_true)\n precision = scores['precision']\n recall = scores['recall']\n accuracy = scores['balanced_accuracy']\n print('***\\nscores:\\n')\n print('precision score:{}\\n recall score:{}\\n balanced accuracy score:{}'.format(\n precision, recall, accuracy))\n" ]
[ [ "sklearn.pipeline.FeatureUnion", "sklearn.linear_model.LogisticRegressionCV", "sklearn.metrics.balanced_accuracy_score", "sklearn.metrics.precision_score", "sklearn.impute.SimpleImputer", "pandas.datetime.now", "sklearn.metrics.recall_score" ] ]
Titousensei/pytext-1
[ "6ea5ce52f5070fc10ac20732f994296c4d445207" ]
[ "pytext/models/representations/pure_doc_attention.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Any, Optional, Union\n\nimport torch\nimport torch.nn as nn\nfrom pytext.config import ConfigBase\nfrom pytext.models.decoders.mlp_decoder import MLPDecoder\nfrom pytext.models.module import create_module\n\nfrom .pooling import BoundaryPool, MaxPool, MeanPool, NoPool, SelfAttention\nfrom .representation_base import RepresentationBase\n\n\nclass PureDocAttention(RepresentationBase):\n \"\"\"pooling (e.g. max pooling or self attention)\n followed by optional MLP\"\"\"\n\n class Config(RepresentationBase.Config):\n dropout: float = 0.4\n pooling: Union[\n SelfAttention.Config,\n MaxPool.Config,\n MeanPool.Config,\n NoPool.Config,\n BoundaryPool.Config,\n ] = SelfAttention.Config()\n mlp_decoder: Optional[MLPDecoder.Config] = None\n\n def __init__(self, config: Config, embed_dim: int) -> None:\n \"\"\"embed_dim is the dimension of embedded_tokens\n \"\"\"\n super().__init__(config)\n\n self.dropout = nn.Dropout(config.dropout)\n\n # Document attention.\n self.attention = (\n create_module(config.pooling, n_input=embed_dim)\n if config.pooling is not None\n else None\n )\n\n # Non-linear projection over attended representation.\n self.dense = None\n if (\n isinstance(config.pooling, BoundaryPool.Config)\n and config.pooling.boundary_type == \"firstlast\"\n ):\n # the dimension double because of concatenating bos and eos\n self.representation_dim = embed_dim * 2\n else:\n self.representation_dim = embed_dim\n\n if config.mlp_decoder:\n self.dense = MLPDecoder(config.mlp_decoder, in_dim=embed_dim)\n self.representation_dim = self.dense.out_dim\n\n def forward(\n self, embedded_tokens: torch.Tensor, seq_lengths: torch.Tensor = None, *args\n ) -> Any:\n rep = self.dropout(embedded_tokens)\n\n # Attention\n if self.attention:\n rep = self.attention(rep, seq_lengths)\n\n # Non-linear projection\n if self.dense:\n rep = self.dense(rep)\n\n return rep\n" ]
[ [ "torch.nn.Dropout" ] ]
LeonOtis/pyscf
[ "98ba8106396ac4c90dc65207059773ce048b0ebf", "98ba8106396ac4c90dc65207059773ce048b0ebf" ]
[ "pyscf/scf/test/test_uhf.py", "pyscf/dft/r_numint.py" ]
[ "#!/usr/bin/env python\n# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\nimport copy\nimport numpy\nimport unittest\nfrom pyscf import lib\nfrom pyscf import gto\nfrom pyscf import scf\n\nscf.uhf.BREAKSYM = True\n\nmol = gto.M(\n verbose = 7,\n output = '/dev/null',\n atom = '''\nO 0 0 0\nH 0 -0.757 0.587\nH 0 0.757 0.587''',\n basis = 'cc-pvdz',\n)\n\nmf = scf.UHF(mol)\nmf.conv_tol = 1e-14\nmf.scf()\n\nmol2 = gto.M(\n verbose = 7,\n output = '/dev/null',\n atom = '''\nO 0 0 0\nH 0 -0.757 0.587\nH 0 0.757 0.587''',\n basis = 'cc-pvdz',\n spin = 2,\n)\nmf2 = scf.UHF(mol2).run(conv_tol=1e-10)\n\nn2sym = gto.M(\n verbose = 7,\n output = '/dev/null',\n atom = '''\n N 0 0 0\n N 0 0 1''',\n symmetry = 1,\n basis = 'cc-pvdz')\nn2mf = scf.UHF(n2sym).set(conv_tol=1e-10).run()\n\ndef tearDownModule():\n global mol, mf, n2sym, n2mf, mol2, mf2\n mol.stdout.close()\n mol2.stdout.close()\n n2sym.stdout.close()\n del mol, mf, n2sym, n2mf, mol2, mf2\n\nclass KnownValues(unittest.TestCase):\n def test_init_guess_minao(self):\n dm1 = mf.init_guess_by_minao(mol, breaksym=False)\n self.assertAlmostEqual(abs(dm1).sum(), 13.649710173723337, 9)\n dm2 = scf.uhf.get_init_guess(mol, key='minao')\n self.assertAlmostEqual(abs(dm2).sum(), 12.913908927027279, 9)\n\n def test_init_guess_1e(self):\n dm1 = scf.uhf.init_guess_by_1e(mol, breaksym=False)\n self.assertAlmostEqual(lib.finger(dm1), -0.17065579929349839, 9)\n dm2 = scf.uhf.get_init_guess(mol, key='hcore')\n self.assertAlmostEqual(lib.finger(dm2), 0.69685247431623965, 9)\n self.assertAlmostEqual(abs(dm1[0]-dm2[0]).max(), 0, 9)\n\n def test_init_guess_atom(self):\n dm1 = mf.init_guess_by_atom(mol, breaksym=False)\n self.assertAlmostEqual(lib.finger(dm1), 0.049712575204034937, 9)\n dm2 = scf.uhf.get_init_guess(mol, key='atom')\n self.assertAlmostEqual(lib.finger(dm2), 0.053542055104367631, 9)\n self.assertAlmostEqual(abs(dm1[1]-dm2[1]).max(), 0, 9)\n\n def test_init_guess_huckel(self):\n dm = scf.uhf.UHF(mol).get_init_guess(mol, key='huckel')\n self.assertAlmostEqual(lib.finger(dm), 0.640409942511017, 9)\n\n def test_1e(self):\n mf = scf.UHF(gto.M(atom='H', spin=1))\n self.assertAlmostEqual(mf.kernel(), -0.46658184955727555, 9)\n mf = scf.UHF(gto.M(atom='H', spin=1, symmetry=1))\n self.assertAlmostEqual(mf.kernel(), -0.46658184955727555, 9)\n\n def test_get_grad(self):\n g = mf2.get_grad(mf2.mo_coeff, mf2.mo_occ)\n self.assertAlmostEqual(abs(g).max(), 0, 6)\n\n def test_energy_tot(self):\n numpy.random.seed(1)\n nao = mol.nao_nr()\n dm = (numpy.random.random((nao,nao)),\n numpy.random.random((nao,nao)))\n e = mf.energy_elec(dm)[0]\n self.assertAlmostEqual(e, 57.122667754846844, 9)\n\n def test_mulliken_pop(self):\n numpy.random.seed(1)\n nao = mol.nao_nr()\n dm = (numpy.random.random((nao,nao)),\n numpy.random.random((nao,nao)))\n pop, chg = mf.mulliken_pop(mol, dm)\n self.assertAlmostEqual(numpy.linalg.norm(pop), 8.3342045408596057, 9)\n pop, chg = mf.mulliken_pop_meta_lowdin_ao(mol, dm, pre_orth_method='ano')\n self.assertAlmostEqual(numpy.linalg.norm(pop), 12.322626374896178, 9)\n\n def test_scf(self):\n self.assertAlmostEqual(mf.e_tot, -76.026765673119627, 9)\n\n def test_nr_uhf_cart(self):\n pmol = mol.copy()\n pmol.cart = True\n mf = scf.UHF(pmol).run()\n self.assertAlmostEqual(mf.e_tot, -76.027107008870573, 9)\n\n def test_nr_uhf_symm_cart(self):\n pmol = mol.copy()\n pmol.cart = True\n pmol.symmetry = 1\n pmol.build()\n mf = scf.UHF(pmol).run()\n self.assertAlmostEqual(mf.e_tot, -76.027107008870573, 9)\n\n def test_spin_square(self):\n self.assertAlmostEqual(mf.spin_square(mf.mo_coeff)[0], 0, 9)\n\n def test_uhf_symm(self):\n pmol = mol.copy()\n pmol.symmetry = 1\n pmol.build(False, False)\n mf = scf.uhf_symm.UHF(pmol)\n self.assertAlmostEqual(mf.scf(), -76.026765673119627, 9)\n\n def test_uhf_symm_fixnocc(self):\n pmol = mol.copy()\n pmol.charge = 1\n pmol.spin = 1\n pmol.symmetry = 1\n pmol.build(False, False)\n mf = scf.uhf_symm.UHF(pmol)\n mf.irrep_nelec = {'B1':(2,1)}\n self.assertAlmostEqual(mf.scf(), -75.010623169610966, 9)\n\n def test_n2_symm(self):\n mf = scf.uhf_symm.UHF(n2sym)\n self.assertAlmostEqual(mf.scf(), -108.9298383856092, 9)\n\n pmol = n2sym.copy()\n pmol.charge = 1\n pmol.spin = 1\n mf = scf.uhf_symm.UHF(pmol)\n self.assertAlmostEqual(mf.scf(), -108.34691774091894, 9)\n\n def test_n2_symm_uhf_fixnocc(self):\n pmol = n2sym.copy()\n pmol.charge = 1\n pmol.spin = 1\n mf = scf.uhf_symm.UHF(pmol)\n mf.irrep_nelec = {'A1g':6, 'A1u':3, 'E1ux':2, 'E1uy':2}\n self.assertAlmostEqual(mf.scf(), -108.22558478425401, 9)\n mf.irrep_nelec = {'A1g':(3,3), 'A1u':(2,1), 'E1ux':(1,1), 'E1uy':(1,1)}\n self.assertAlmostEqual(mf.scf(), -108.22558478425401, 9)\n\n def test_uhf_get_occ(self):\n mol = gto.M(verbose=7, output='/dev/null').set(nelectron=8, spin=2)\n mf = scf.uhf.UHF(mol)\n energy = numpy.array(([-10, -1, 1, -2, 0, -3], [8, 2, 4, 3, 0, 5]))\n self.assertTrue(numpy.allclose(mf.get_occ(energy),\n ([1, 1, 0, 1, 1, 1], [0, 1, 0, 1, 1, 0])))\n pmol = n2sym.copy()\n pmol.spin = 2\n pmol.symmetry = False\n mf = scf.UHF(pmol).set(verbose = 0)\n energy = numpy.array([[34, 2 , 54, 43, 42, 33, 20, 61, 29, 26, 62, 52, 13, 51, 18, 78, 85, 49, 84, 7],\n [29, 26, 13, 54, 18, 78, 85, 49, 84, 62, 42, 74, 20, 61, 51, 34, 2 , 33, 52, 3]])\n self.assertTrue(numpy.allclose(mf.get_occ(energy),\n [[0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1],\n [0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]]))\n mol.stdout.close()\n\n def test_uhf_symm_get_occ(self):\n pmol = n2sym.copy()\n pmol.spin = 2\n mf = scf.UHF(pmol).set(verbose = 0)\n orbsym = numpy.array([[0 , 5 , 0 , 5 , 6 , 7 , 0 , 2 , 3 , 5 , 0 , 6 , 7 , 0 , 2 , 3 , 5 , 10, 11, 5],\n [5 , 0 , 6 , 7 , 5 , 10, 11, 0 , 5 , 0 , 5 , 5 , 6 , 7 , 0 , 2 , 3 , 0 , 2 , 3]])\n energy = numpy.array([[34, 2 , 54, 43, 42, 33, 20, 61, 29, 26, 62, 52, 13, 51, 18, 78, 85, 49, 84, 7],\n [29, 26, 13, 54, 18, 78, 85, 49, 84, 62, 42, 74, 20, 61, 51, 34, 2 , 33, 52, 3]])\n mf.irrep_nelec = {'A1g':7, 'A1u':3, 'E1ux':2, 'E1uy':2}\n mo_coeff = lib.tag_array([numpy.eye(energy.size)]*2, orbsym=orbsym)\n self.assertTrue(numpy.allclose(mf.get_occ(energy, mo_coeff),\n [[1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1],\n [0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]]))\n mf.irrep_nelec = {'A1g':(5,2), 'A1u':(1,2)}\n self.assertTrue(numpy.allclose(mf.get_occ(energy, mo_coeff),\n [[1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1]]))\n mf.irrep_nelec = {'E1ux':2, 'E1uy':2}\n self.assertTrue(numpy.allclose(mf.get_occ(energy, mo_coeff),\n [[0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1],\n [0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1]]))\n mf.irrep_nelec = {}\n self.assertTrue(numpy.allclose(mf.get_occ(energy, mo_coeff),\n [[0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1],\n [0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]]))\n\n def test_uhf_symm_dump_flags(self):\n pmol = n2sym.copy()\n pmol.spin = 2\n mf = scf.UHF(pmol).set(verbose = 0)\n mf.irrep_nelec = {'A1g':6, 'A1u':4, 'E1ux':2, 'E1uy':2}\n self.assertRaises(ValueError, mf.build)\n\n def test_det_ovlp(self):\n s, x = mf.det_ovlp(mf.mo_coeff, mf.mo_coeff, mf.mo_occ, mf.mo_occ)\n self.assertAlmostEqual(s, 1.000000000, 9)\n self.assertAlmostEqual(numpy.trace(x[0]), mol.nelec[0]*1.000000000, 9)\n self.assertAlmostEqual(numpy.trace(x[0]), mol.nelec[1]*1.000000000, 9)\n\n def test_dip_moment(self):\n dip = mf.dip_moment(unit='au')\n self.assertTrue(numpy.allclose(dip, [0.00000, 0.00000, 0.80985]))\n\n def test_get_wfnsym(self):\n self.assertEqual(n2mf.wfnsym, 0)\n\n pmol = n2sym.copy()\n pmol.spin = 2\n mf = scf.UHF(pmol).set(verbose = 0).run()\n self.assertTrue(mf.wfnsym in (2, 3))\n\n def test_complex_orbitals(self):\n nao = mol.nao_nr()\n mf = scf.UHF(mol)\n mf.kernel(numpy.zeros((2,nao,nao))*0j)\n self.assertAlmostEqual(mf.e_tot, -76.026765673119627, 9)\n\n mf = scf.UHF(mol).set(max_memory=0)\n mf.kernel(numpy.zeros((2,nao,nao))*0j)\n self.assertAlmostEqual(mf.e_tot, -76.026765673119627, 9)\n\n def test_make_asm_dm(self):\n mo_ba = (mf2.mo_coeff[1], mf2.mo_coeff[0])\n mo_occ = mf.mo_occ\n det, x = mf2.det_ovlp(mf2.mo_coeff, mo_ba, mo_occ, mo_occ)\n s = mf2.get_ovlp()\n self.assertAlmostEqual(det, 0.95208556738844452, 6)\n dm = mf2.make_asym_dm(mf2.mo_coeff, mo_ba, mo_occ, mo_occ, x)\n self.assertAlmostEqual(numpy.einsum('ij,ji', s, dm[0]), 5, 9)\n self.assertAlmostEqual(numpy.einsum('ij,ji', s, dm[1]), 5, 9)\n\n def test_analyze(self):\n (pop, chg), dip = mf.analyze()\n self.assertAlmostEqual(numpy.linalg.norm(pop[0]+pop[1]), 4.0049440587033116, 6)\n self.assertAlmostEqual(numpy.linalg.norm(dip), 2.05844441822, 5)\n (pop, chg), dip = mf.analyze(with_meta_lowdin=False)\n self.assertAlmostEqual(numpy.linalg.norm(pop[0]+pop[1]), 3.2031790129016922, 6)\n\n mf1 = copy.copy(n2mf)\n (pop, chg), dip = n2mf.analyze()\n self.assertAlmostEqual(numpy.linalg.norm(pop[0]+pop[1]), 4.5467414321488357, 6)\n self.assertAlmostEqual(numpy.linalg.norm(dip), 0, 9)\n mf1.mo_coeff = numpy.array(n2mf.mo_coeff)\n (pop, chg), dip = mf1.analyze(with_meta_lowdin=False)\n self.assertAlmostEqual(numpy.linalg.norm(pop[0]+pop[1]), 3.8893148995392353, 6)\n\n def test_get_veff(self):\n nao = mol.nao_nr()\n numpy.random.seed(1)\n d1 = numpy.random.random((nao,nao))\n d2 = numpy.random.random((nao,nao))\n d = (d1+d1.T, d2+d2.T)\n v = scf.uhf.get_veff(mol, d)\n self.assertAlmostEqual(numpy.linalg.norm(v), 398.09239104094513, 9)\n\n pmol = gto.Mole()\n pmol.atom = '''\nO 0 0 0\nH 0 -0.757 0.587\nH 0 0.757 0.587'''\n pmol.basis = '6-31g'\n pmol.cart = True\n\n mf1 = scf.uhf.UHF(pmol)\n mf1.direct_scf = True\n mf1.max_memory = 0\n nao = pmol.nao_nr()\n numpy.random.seed(1)\n dm = numpy.random.random((2,3,nao,nao)) - .5 + 0j\n vhf3a = mf1.get_veff(pmol, dm[:,0], hermi=0)\n vhf3b = mf1.get_veff(pmol, dm[:,1], hermi=0)\n vhf3c = mf1.get_veff(pmol, dm[:,2], hermi=0)\n vhf3 = numpy.array((vhf3a, vhf3b, vhf3c)).transpose(1,0,2,3)\n\n vhf4 = mf1.get_veff(pmol, dm, hermi=0)\n self.assertEqual(vhf4.ndim, 4)\n self.assertAlmostEqual(abs(vhf4-vhf3).max(), 0, 12)\n self.assertAlmostEqual(lib.finger(vhf4), -9.9614575705134953, 12)\n\n def test_natm_eq_0(self):\n mol = gto.M()\n mol.spin = 2\n mol.nelectron = 2\n mf = scf.UHF(mol)\n mf.get_hcore = lambda *args: numpy.diag(numpy.arange(3))\n mf.get_ovlp = lambda *args: numpy.eye(3)\n mf._eri = numpy.zeros((3,3,3,3))\n for i in range(3):\n mf._eri[i,i,i,i] = .2\n dm = mf.get_init_guess(mol, key='hcore')\n self.assertTrue(numpy.allclose(dm[0].diagonal(), [1,1,0]))\n mf.kernel()\n self.assertAlmostEqual(mf.e_tot, 1.0, 9)\n\n def test_canonicalize(self):\n mo_coeff = numpy.array(n2mf.mo_coeff)\n e, c = n2mf.canonicalize(mo_coeff, n2mf.mo_occ)\n self.assertAlmostEqual(abs(e - n2mf.mo_energy).max(), 0, 6)\n\n n2_uhf = n2mf.view(scf.uhf.UHF)\n e, c = n2_uhf.canonicalize(n2mf.mo_coeff, n2mf.mo_occ)\n self.assertAlmostEqual(abs(e - n2mf.mo_energy).max(), 0, 6)\n\n def test_energy_tot(self):\n e = n2mf.energy_tot(n2mf.make_rdm1())\n self.assertAlmostEqual(e, n2mf.e_tot, 9)\n\n def test_get_occ_extreme_case(self):\n mol = gto.M(atom='He', verbose=7, output='/dev/null')\n mf = scf.UHF(mol).run()\n self.assertAlmostEqual(mf.e_tot, -2.8077839575399737, 12)\n\n mol.charge = 2\n mf = scf.UHF(mol).run()\n self.assertAlmostEqual(mf.e_tot, 0, 12)\n mol.stdout.close()\n\n def test_damping(self):\n nao = mol.nao_nr()\n numpy.random.seed(1)\n s = scf.hf.get_ovlp(mol)\n d = numpy.random.random((nao,nao))\n d = (d + d.T) * 2\n vhf = 0\n f = scf.uhf.get_fock(mf, scf.hf.get_hcore(mol), s, vhf, d, cycle=0,\n diis_start_cycle=2, damp_factor=0.5)\n self.assertAlmostEqual(numpy.linalg.norm(f[0]), 23361.854064083178, 9)\n self.assertAlmostEqual(numpy.linalg.norm(f[1]), 23361.854064083178, 9)\n\n def test_get_irrep_nelec(self):\n fock = n2mf.get_fock()\n s1e = n2mf.get_ovlp()\n e, c = n2mf.eig(fock, s1e)\n mo_occ = n2mf.get_occ(e, c)\n n2_uhf = n2mf.view(scf.uhf_symm.UHF)\n irrep_nelec = n2_uhf.get_irrep_nelec(n2sym, c, mo_occ)\n self.assertEqual(irrep_nelec['A1u'], (2,2))\n self.assertEqual(irrep_nelec['A1g'], (3,3))\n self.assertEqual(irrep_nelec['E1ux'], (1,1))\n self.assertEqual(irrep_nelec['E1uy'], (1,1))\n mo_coeff = numpy.array(c)\n irrep_nelec = n2_uhf.get_irrep_nelec(n2sym, mo_coeff, mo_occ)\n self.assertEqual(irrep_nelec['A1u'], (2,2))\n self.assertEqual(irrep_nelec['A1g'], (3,3))\n self.assertEqual(irrep_nelec['E1ux'], (1,1))\n self.assertEqual(irrep_nelec['E1uy'], (1,1))\n\n n2_uhf.irrep_nelec = irrep_nelec\n n2_uhf.irrep_nelec['A2g'] = 0\n n2_uhf.nelec = (8,6)\n self.assertRaises(ValueError, n2_uhf.build)\n n2_uhf.irrep_nelec['A1g'] = (2,2)\n n2_uhf.irrep_nelec['E2gx'] = 0\n n2_uhf.irrep_nelec['E2gy'] = 0\n n2_uhf.irrep_nelec['E2ux'] = 0\n n2_uhf.irrep_nelec['E2uy'] = 0\n self.assertRaises(ValueError, n2_uhf.build)\n n2_uhf.irrep_nelec['A1g'] = (4,2)\n self.assertRaises(ValueError, n2_uhf.build)\n n2_uhf.irrep_nelec['A1g'] = (0,2)\n self.assertRaises(ValueError, n2_uhf.build)\n\n\nif __name__ == \"__main__\":\n print(\"Full Tests for uhf\")\n unittest.main()\n\n", "#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\nimport ctypes\nimport numpy\nimport scipy.linalg\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf.dft import numint\nfrom pyscf.dft.numint import _dot_ao_dm, _dot_ao_ao, BLKSIZE\n\n\ndef eval_ao(mol, coords, deriv=0, with_s=True, shls_slice=None,\n non0tab=None, out=None, verbose=None):\n comp = (deriv+1)*(deriv+2)*(deriv+3)//6\n feval = 'GTOval_spinor_deriv%d' % deriv\n aoLa, aoLb = mol.eval_gto(feval, coords, comp, shls_slice, non0tab, out=out)\n if with_s:\n assert(deriv <= 1) # only GTOval_ipsp_spinor\n ngrid, nao = aoLa.shape[-2:]\n if out is not None:\n aoSa = numpy.empty((comp,nao,ngrid), dtype=numpy.complex128)\n aoSb = numpy.empty((comp,nao,ngrid), dtype=numpy.complex128)\n else:\n out = numpy.ndarray((4,comp,nao,ngrid), dtype=numpy.complex128, buffer=out)\n aoSa, aoSb = out[2:]\n comp = 1\n ao = mol.eval_gto('GTOval_sp_spinor', coords, comp, shls_slice, non0tab)\n aoSa[0] = ao[0].T\n aoSb[0] = ao[1].T\n fevals = ['GTOval_sp_spinor', 'GTOval_ipsp_spinor']\n p1 = 1\n for n in range(1, deriv+1):\n comp = (n+1)*(n+2)//2\n ao = mol.eval_gto(fevals[n], coords, comp, shls_slice, non0tab)\n p0, p1 = p1, p1 + comp\n for k in range(comp):\n aoSa[p0:p1] = ao[0].transpose(0,2,1)\n aoSb[p0:p1] = ao[1].transpose(0,2,1)\n aoSa = aoSa.transpose(0,2,1)\n aoSb = aoSb.transpose(0,2,1)\n if deriv == 0:\n aoSa = aoSa[0]\n aoSb = aoSb[0]\n return aoLa, aoLb, aoSa, aoSb\n else:\n return aoLa, aoLb\n\ndef _dm2c_to_rho2x2(mol, ao, dm, non0tab, shls_slice, ao_loc, out=None):\n aoa, aob = ao\n out = _dot_ao_dm(mol, aoa, dm, non0tab, shls_slice, ao_loc, out=out)\n rhoaa = numpy.einsum('pi,pi->p', aoa.real, out.real)\n rhoaa+= numpy.einsum('pi,pi->p', aoa.imag, out.imag)\n rhoba = numpy.einsum('pi,pi->p', aob, out.conj())\n out = _dot_ao_dm(mol, aob, dm, non0tab, shls_slice, ao_loc, out=out)\n rhoab = numpy.einsum('pi,pi->p', aoa, out.conj())\n rhobb = numpy.einsum('pi,pi->p', aob.real, out.real)\n rhobb+= numpy.einsum('pi,pi->p', aob.imag, out.imag)\n return rhoaa, rhoab, rhoba, rhobb\n\ndef _rho2x2_to_rho_m(rho2x2):\n raa, rab, rba, rbb = rho2x2\n rho = (raa + rbb).real\n mx = rab.real + rba.real\n my = rba.imag - rab.imag\n mz = raa - rbb\n m = numpy.vstack((mx, my, mz))\n return rho, m\n\n#TODO: \\nabla^2 rho and tau = 1/2 (\\nabla f)^2\ndef eval_rho(mol, ao, dm, non0tab=None, xctype='LDA', hermi=0, verbose=None):\n aoa, aob = ao\n ngrids, nao = aoa.shape[-2:]\n xctype = xctype.upper()\n\n if non0tab is None:\n non0tab = numpy.ones(((ngrids+BLKSIZE-1)//BLKSIZE,mol.nbas),\n dtype=numpy.uint8)\n shls_slice = (0, mol.nbas)\n ao_loc = mol.ao_loc_2c()\n\n if xctype == 'LDA':\n tmp = _dm2c_to_rho2x2(mol, ao, dm, non0tab, shls_slice, ao_loc)\n rho, m = _rho2x2_to_rho_m(tmp)\n elif xctype == 'GGA':\n raise NotImplementedError\n else: # meta-GGA\n raise NotImplementedError\n return rho, m\n\ndef _vxc2x2_to_mat(mol, ao, weight, rho, vrho, non0tab, shls_slice, ao_loc):\n aoa, aob = ao\n r, m = rho\n vr, vm = vrho.T\n aow = numpy.empty_like(aoa)\n# aow = numpy.einsum('pi,p->pi', aoa, weight*vr, out=aow)\n# mat = _dot_ao_ao(mol, aoa, aow, non0tab, shls_slice, ao_loc)\n# aow = numpy.einsum('pi,p->pi', aob, weight*vr, out=aow)\n# mat+= _dot_ao_ao(mol, aob, aow, non0tab, shls_slice, ao_loc)\n#\n# s = lib.norm(m, axis=0)\n# ws = vm * weight / (s+1e-300)\n# aow = numpy.einsum('pi,p->pi', aoa, ws*m[0], out=aow) # Mx\n# tmp = _dot_ao_ao(mol, aob, aow, non0tab, shls_slice, ao_loc)\n# mat+= tmp + tmp.T.conj()\n# aow = numpy.einsum('pi,p->pi', aoa, ws*m[1], out=aow) # My\n# tmp = _dot_ao_ao(mol, aob, aow, non0tab, shls_slice, ao_loc)\n# mat+= (tmp - tmp.T.conj()) * 1j\n# aow = numpy.einsum('pi,p->pi', aoa, ws*m[2], out=aow) # Mz\n# mat+= _dot_ao_ao(mol, aoa, aow, non0tab, shls_slice, ao_loc)\n# aow = numpy.einsum('pi,p->pi', aob, ws*m[2], out=aow)\n# mat-= _dot_ao_ao(mol, aob, aow, non0tab, shls_slice, ao_loc)\n\n s = lib.norm(m, axis=0)\n ws = vm * weight / (s+1e-300)\n aow = numpy.einsum('pi,p->pi', aoa, ws*m[0], out=aow) # Mx\n tmp = _dot_ao_ao(mol, aob, aow, non0tab, shls_slice, ao_loc)\n mat = tmp + tmp.T.conj()\n aow = numpy.einsum('pi,p->pi', aoa, ws*m[1], out=aow) # My\n tmp = _dot_ao_ao(mol, aob, aow, non0tab, shls_slice, ao_loc)\n mat+= (tmp - tmp.T.conj()) * 1j\n aow = numpy.einsum('pi,p->pi', aoa, weight*vr, out=aow)\n aow+= numpy.einsum('pi,p->pi', aoa, ws*m[2]) # Mz\n mat+= _dot_ao_ao(mol, aoa, aow, non0tab, shls_slice, ao_loc)\n aow = numpy.einsum('pi,p->pi', aob, weight*vr, out=aow)\n aow-= numpy.einsum('pi,p->pi', aob, ws*m[2]) # Mz\n mat+= _dot_ao_ao(mol, aob, aow, non0tab, shls_slice, ao_loc)\n return mat\n\ndef eval_mat(mol, ao, weight, rho, vxc,\n non0tab=None, xctype='LDA', verbose=None):\n aoa, aob = ao\n xctype = xctype.upper()\n ngrids, nao = aoa.shape[-2:]\n\n if non0tab is None:\n non0tab = numpy.ones(((ngrids+BLKSIZE-1)//BLKSIZE,mol.nbas),\n dtype=numpy.uint8)\n shls_slice = (0, mol.nbas)\n ao_loc = mol.ao_loc_2c()\n if xctype == 'LDA':\n vrho = vxc[0]\n mat = _vxc2x2_to_mat(mol, ao, weight, rho, vrho, non0tab, shls_slice, ao_loc)\n else:\n raise NotImplementedError\n return mat\n\ndef r_vxc(ni, mol, grids, xc_code, dms, spin=0, relativity=0, hermi=1,\n max_memory=2000, verbose=None):\n xctype = ni._xc_type(xc_code)\n shls_slice = (0, mol.nbas)\n ao_loc = mol.ao_loc_2c()\n n2c = ao_loc[-1]\n\n make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi)\n with_s = (nao == n2c*2) # 4C DM\n\n nelec = numpy.zeros(nset)\n excsum = numpy.zeros(nset)\n matLL = numpy.zeros((nset,n2c,n2c), dtype=numpy.complex128)\n matSS = numpy.zeros((nset,n2c,n2c), dtype=numpy.complex128)\n if xctype == 'LDA':\n for ao, mask, weight, coords \\\n in ni.block_loop(mol, grids, nao, 0, with_s, max_memory):\n for idm in range(nset):\n rho = make_rho(idm, ao, mask, xctype)\n exc, vxc = ni.eval_xc(xc_code, rho, 1, relativity, 1,\n verbose=verbose)[:2]\n vrho = vxc[0]\n den = rho[0] * weight\n nelec[idm] += den.sum()\n excsum[idm] += (den*exc).sum()\n\n matLL[idm] += _vxc2x2_to_mat(mol, ao[:2], weight, rho, vrho,\n mask, shls_slice, ao_loc)\n if with_s:\n matSS[idm] += _vxc2x2_to_mat(mol, ao[2:], weight, rho, vrho,\n mask, shls_slice, ao_loc)\n rho = m = exc = vxc = vrho = None\n elif xctype == 'GGA':\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n if with_s:\n c1 = .5 / lib.param.LIGHT_SPEED\n vmat = numpy.zeros((nset,nao,nao), dtype=numpy.complex128)\n for idm in range(nset):\n vmat[idm,:n2c,:n2c] = matLL[idm]\n vmat[idm,n2c:,n2c:] = matSS[idm] * c1**2\n else:\n vmat = matLL\n\n if nset == 1:\n nelec = nelec[0]\n excsum = excsum[0]\n return nelec, excsum, vmat.reshape(dms.shape)\n\n\ndef get_rho(ni, mol, dm, grids, max_memory=2000):\n make_rho, nset, nao = ni._gen_rho_evaluator(mol, dm, hermi=1)\n n2c = mol.nao_2c()\n with_s = (nao == n2c*2) # 4C DM\n rho = numpy.empty(grids.weights.size)\n p1 = 0\n for ao, mask, weight, coords \\\n in ni.block_loop(mol, grids, nao, 0, with_s, max_memory):\n p0, p1 = p1, p1 + weight.size\n rho[p0:p1] = make_rho(0, ao, mask, 'LDA')[0]\n return rho\n\n\nclass RNumInt(numint.NumInt):\n\n r_vxc = nr_vxc = r_vxc\n get_rho = get_rho\n\n def eval_ao(self, mol, coords, deriv=0, with_s=True, shls_slice=None,\n non0tab=None, out=None, verbose=None):\n return eval_ao(mol, coords, deriv, with_s, shls_slice, non0tab, out, verbose)\n\n def eval_rho2(self, mol, ao, mo_coeff, mo_occ, non0tab=None, xctype='LDA',\n verbose=None):\n raise NotImplementedError\n\n @lib.with_doc(eval_rho.__doc__)\n def eval_rho(self, mol, ao, dm, non0tab=None, xctype='LDA', verbose=None):\n return eval_rho(mol, ao, dm, non0tab, xctype, verbose)\n\n def block_loop(self, mol, grids, nao, deriv=0, with_s=False, max_memory=2000,\n non0tab=None, blksize=None, buf=None):\n '''Define this macro to loop over grids by blocks.\n '''\n if grids.coords is None:\n grids.build(with_non0tab=True)\n ngrids = grids.weights.size\n comp = (deriv+1)*(deriv+2)*(deriv+3)//6\n# NOTE to index ni.non0tab, the blksize needs to be the integer multiplier of BLKSIZE\n if blksize is None:\n blksize = min(int(max_memory*1e6/((comp*4+4)*nao*16*BLKSIZE))*BLKSIZE, ngrids)\n blksize = max(blksize, BLKSIZE)\n if non0tab is None:\n non0tab = grids.non0tab\n if non0tab is None:\n non0tab = numpy.ones(((ngrids+BLKSIZE-1)//BLKSIZE,mol.nbas),\n dtype=numpy.uint8)\n feval = 'GTOval_spinor_deriv%d' % deriv\n if buf is None:\n buf = numpy.empty((4,comp,blksize,nao), dtype=numpy.complex128)\n for ip0 in range(0, ngrids, blksize):\n ip1 = min(ngrids, ip0+blksize)\n coords = grids.coords[ip0:ip1]\n weight = grids.weights[ip0:ip1]\n non0 = non0tab[ip0//BLKSIZE:]\n ao = self.eval_ao(mol, coords, deriv=deriv, with_s=with_s,\n non0tab=non0, out=buf)\n yield ao, non0, weight, coords\n\n def _gen_rho_evaluator(self, mol, dms, hermi=1):\n dms = numpy.asarray(dms)\n nao = dms.shape[-1]\n if isinstance(dms, numpy.ndarray) and dms.ndim == 2:\n dms = dms.reshape(1,nao,nao)\n ndms = len(dms)\n n2c = mol.nao_2c()\n with_s = (nao == n2c*2) # 4C DM\n if with_s:\n c1 = .5 / lib.param.LIGHT_SPEED\n dmLL = dms[:,:n2c,:n2c].copy('C')\n dmSS = dms[:,n2c:,n2c:] * c1**2\n def make_rho(idm, ao, non0tab, xctype):\n rho , m = self.eval_rho(mol, ao[:2], dmLL[idm], non0tab, xctype)\n rhoS, mS = self.eval_rho(mol, ao[2:], dmSS[idm], non0tab, xctype)\n rho += rhoS\n # M = |\\beta\\Sigma|\n m[0] -= mS[0]\n m[1] -= mS[1]\n m[2] -= mS[2]\n return rho, m\n else:\n def make_rho(idm, ao, non0tab, xctype):\n return self.eval_rho(mol, ao, dms[idm], non0tab, xctype)\n return make_rho, ndms, nao\n\n def eval_xc(self, xc_code, rho, spin=1, relativity=0, deriv=1, verbose=None):\n # JTCC, 2, 257\n r, m = rho[:2]\n s = lib.norm(m, axis=0)\n rhou = (r + s) * .5\n rhod = (r - s) * .5\n rho = (rhou, rhod)\n xc = self.libxc.eval_xc(xc_code, rho, spin, relativity, deriv, verbose)\n exc, vxc = xc[:2]\n vrho = vxc[0]\n vr, vm = (vrho[:,0]+vrho[:,1])*.5, (vrho[:,0]-vrho[:,1])*.5\n vrho[:,0] = vr\n vrho[:,1] = vm\n return xc\n_RNumInt = RNumInt\n\n\nif __name__ == '__main__':\n import time\n from pyscf import gto\n from pyscf import dft\n from pyscf.dft import dks\n\n mol = gto.M(\n atom = [\n [\"O\" , (0. , 0. , 0.)],\n [1 , (0. , -0.757 , 0.587)],\n [1 , (0. , 0.757 , 0.587)] ],\n basis = '6311g*',)\n mf = dks.UKS(mol)\n mf.grids.atom_grid = {\"H\": (30, 194), \"O\": (30, 194),}\n mf.grids.prune = None\n mf.grids.build()\n dm = mf.get_init_guess(key='minao')\n\n print(time.clock())\n res = mf._numint.r_vxc(mol, mf.grids, mf.xc, dm, spin=0)\n print(res[1] - 0)\n print(time.clock())\n" ]
[ [ "numpy.random.random", "numpy.allclose", "numpy.random.seed", "numpy.einsum", "numpy.arange", "numpy.eye", "numpy.linalg.norm", "numpy.array", "numpy.zeros", "numpy.trace" ], [ "numpy.einsum", "numpy.asarray", "numpy.empty_like", "numpy.ndarray", "numpy.empty", "numpy.ones", "numpy.zeros", "numpy.vstack" ] ]
zouwenjiao/DSCI_522_Group304
[ "fe03bcff23cced5b9934e959355e3bb4e4af007d" ]
[ "src/add_subgroup_info.py" ]
[ "# author: Group 304 (Anny Chih)\n# date: 2020-02-04\n\n\"\"\"This script takes in a cleaned datafile and \nadds a column of values indicating if the school has BOTH Aboriginal and Non Aboriginal Students\n\nUsage: add_subgroup_info.py --clean_data=<clean_data> --new_data=<new_data> \n\nOptions:\n--clean_data=<clean_data> local path and file name of the clean datafile\n--new_data=<new_data> local path and file name of the output dataset that contains only schools with both Aboriginal and Non Aboriginal students\n\"\"\"\n# Example of how to run this script:\n# python src/add_subgroup_info.py --clean_data=\"data/clean_data.csv\" --new_data=\"data/new_clean_data.csv\"\n\nimport os\nimport pandas as pd\n\nfrom docopt import docopt\nopt = docopt(__doc__)\n\n# Tests that the file path exists\ndef check_path():\n if (os.path.exists(opt[\"--clean_data\"])):\n pass\n else:\n raise ValueError('The clean_data file cannot be found. Please check the path provided.')\n \ncheck_path()\n\ndef main(clean_data, new_data):\n \n # Reads in data\n data = pd.read_csv(clean_data)\n \n # Filters data to create a filter which will be used on the data\n filtered_data = data[data.year_start == 2018]\n filtered_data = filtered_data[filtered_data['sub_population'].isin(['ABORIGINAL', 'NON ABORIGINAL'])]\n filtered_data_num = filtered_data[filtered_data.fsa_skill_code == 'Numeracy']\n \n # Fills a list with the school numbers of schools with both Aboriginal and Non Aboriginal students\n num_filter = []\n for i in data.school_number.unique():\n if filtered_data_num[filtered_data_num.school_number == i].count()['score'] > 2:\n num_filter.append(i)\n \n # Creates an empty dataframe to store schools with both Aboriginal and Non Aboriginal students\n has_both = pd.DataFrame()\n \n # Adds the school numbers to the dataframe of schools with both Aboriginal and Non Aboriginal students\n has_both['school_number'] = num_filter\n has_both['has_both'] = 1\n \n # Adds the information from the 'has_both' table to the main dataset\n result = data.merge(has_both, how = 'left')\n \n # Fills 'has_both' with 0 if the school does not have both Aboriginal and Non Aboriginal students\n result[['has_both']] = result[['has_both']].fillna(value=0)\n \n # Saves final cleaned data file to specified local filepath\n result.to_csv(new_data)\n \nif __name__ == \"__main__\":\n main(opt[\"--clean_data\"], opt[\"--new_data\"])\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
alphadadajuju/online_action_two_stream
[ "b6d717f89e5c36b3c7929b651b30fb1125fc5d42" ]
[ "train-ucf24.py" ]
[ "\n\"\"\" Adapted from:\n @longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch\n @rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn\n Which was adopated by: Ellis Brown, Max deGroot\n https://github.com/amdegroot/ssd.pytorch\n\n Further:\n Updated by Gurkirt Singh for ucf101-24 dataset\n Licensed under The MIT License [see LICENSE for details]\n\"\"\"\n\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.init as init\nimport argparse\nfrom torch.autograd import Variable\nimport torch.utils.data as data\nfrom data import v2, UCF24Detection, AnnotationTransform, detection_collate, CLASSES, BaseTransform\nfrom utils.augmentations import SSDAugmentation\nfrom layers.modules import MultiBoxLoss\nfrom ssd import build_ssd\nimport numpy as np\nimport time\nfrom utils.evaluation import evaluate_detections\nfrom layers.box_utils import decode, nms\nfrom utils import AverageMeter\nfrom torch.optim.lr_scheduler import MultiStepLR\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nparser = argparse.ArgumentParser(description='Single Shot MultiBox Detector Training')\nparser.add_argument('--version', default='v2', help='conv11_2(v2) or pool6(v1) as last layer')\nparser.add_argument('--basenet', default='vgg16_reducedfc.pth', help='pretrained base model')\nparser.add_argument('--dataset', default='ucf24', help='pretrained base model')\nparser.add_argument('--ssd_dim', default=300, type=int, help='Input Size for SSD') # only support 300 now\nparser.add_argument('--input_type', default='rgb', type=str, help='INput tyep default rgb options are [rgb,brox,fastOF]')\nparser.add_argument('--jaccard_threshold', default=0.5, type=float, help='Min Jaccard index for matching')\nparser.add_argument('--batch_size', default=32, type=int, help='Batch size for training')\nparser.add_argument('--resume', default=None, type=str, help='Resume from checkpoint')\nparser.add_argument('--num_workers', default=4, type=int, help='Number of workers used in dataloading')\nparser.add_argument('--max_iter', default=150000, type=int, help='Number of training iterations')\nparser.add_argument('--man_seed', default=123, type=int, help='manualseed for reproduction')\nparser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model')\nparser.add_argument('--ngpu', default=1, type=str2bool, help='Use cuda to train model')\nparser.add_argument('--lr', '--learning-rate', default=1e-3, type=float, help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, help='momentum')\nparser.add_argument('--stepvalues', default='30000,60000,100000', type=str, help='iter numbers where learing rate to be dropped')\nparser.add_argument('--weight_decay', default=5e-4, type=float, help='Weight decay for SGD')\nparser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD')\nparser.add_argument('--visdom', default=False, type=str2bool, help='Use visdom to for loss visualization')\nparser.add_argument('--vis_port', default=8097, type=int, help='Port for Visdom Server')\nparser.add_argument('--data_root', default='/mnt/mars-fast/datasets/', help='Location of VOC root directory')\nparser.add_argument('--save_root', default='/mnt/mars-gamma/datasets/', help='Location to save checkpoint models')\nparser.add_argument('--iou_thresh', default=0.5, type=float, help='Evaluation threshold')\nparser.add_argument('--conf_thresh', default=0.01, type=float, help='Confidence threshold for evaluation')\nparser.add_argument('--nms_thresh', default=0.45, type=float, help='NMS threshold')\nparser.add_argument('--topk', default=50, type=int, help='topk for evaluation')\n\n### Newly added arguments\nparser.add_argument('--eval_step', default=10000, type=int, help='validation and save checkpoints')\nparser.add_argument('--start_iter', default=0, type=int, help='Resume training at this iter')\n\n## Parse arguments\nargs = parser.parse_args()\n## set random seeds\nnp.random.seed(args.man_seed)\ntorch.manual_seed(args.man_seed)\nif args.cuda:\n torch.cuda.manual_seed_all(args.man_seed)\n\n\ntorch.set_default_tensor_type('torch.FloatTensor')\n\n\ndef main():\n args.cfg = v2\n args.train_sets = 'train'\n args.means = (104, 117, 123)\n num_classes = len(CLASSES) + 1\n args.num_classes = num_classes\n args.stepvalues = [int(val) for val in args.stepvalues.split(',')]\n args.loss_reset_step = 30\n args.eval_step = 10000\n args.print_step = 10\n\n ## Define the experiment Name will used to same directory and ENV for visdom\n args.exp_name = 'CONV-SSD-{}-{}-bs-{}-{}-lr-{:05d}'.format(args.dataset,\n args.input_type, args.batch_size, args.basenet[:-14], int(args.lr*100000))\n\n args.save_root += args.dataset+'/'\n args.save_root = args.save_root+'cache/'+args.exp_name+'/'\n\n if not os.path.isdir(args.save_root):\n os.makedirs(args.save_root)\n\n net = build_ssd(300, args.num_classes)\n\n if args.cuda:\n net = net.cuda()\n\n def xavier(param):\n init.xavier_uniform(param)\n\n def weights_init(m):\n if isinstance(m, nn.Conv2d):\n xavier(m.weight.data)\n m.bias.data.zero_()\n\n\n print('Initializing weights for extra layers and HEADs...')\n # initialize newly added layers' weights with xavier method\n net.extras.apply(weights_init)\n net.loc.apply(weights_init)\n net.conf.apply(weights_init)\n\n if args.input_type == 'fastOF':\n print('Download pretrained brox flow trained model weights and place them at:::=> ',args.data_root + 'ucf24/train_data/brox_wieghts.pth')\n pretrained_weights = args.data_root + 'ucf24/train_data/brox_wieghts.pth'\n print('Loading base network...')\n net.load_state_dict(torch.load(pretrained_weights))\n else:\n vgg_weights = torch.load(args.data_root +'ucf24/train_data/' + args.basenet)\n print('Loading base network...')\n net.vgg.load_state_dict(vgg_weights)\n\n args.data_root += args.dataset + '/'\n\n parameter_dict = dict(net.named_parameters()) # Get parmeter of network in dictionary format wtih name being key\n params = []\n\n #Set different learning rate to bias layers and set their weight_decay to 0\n for name, param in parameter_dict.items():\n if name.find('bias') > -1:\n print(name, 'layer parameters will be trained @ {}'.format(args.lr*2))\n params += [{'params': [param], 'lr': args.lr*2, 'weight_decay': 0}]\n else:\n print(name, 'layer parameters will be trained @ {}'.format(args.lr))\n params += [{'params':[param], 'lr': args.lr, 'weight_decay':args.weight_decay}]\n\n optimizer = optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n criterion = MultiBoxLoss(args.num_classes, 0.5, True, 0, True, 3, 0.5, False, args.cuda)\n scheduler = MultiStepLR(optimizer, milestones=args.stepvalues, gamma=args.gamma)\n train(args, net, optimizer, criterion, scheduler)\n\n\ndef train(args, net, optimizer, criterion, scheduler):\n log_file = open(args.save_root+\"training.log\", \"w\", 1)\n log_file.write(args.exp_name+'\\n')\n for arg in vars(args):\n print(arg, getattr(args, arg))\n log_file.write(str(arg)+': '+str(getattr(args, arg))+'\\n')\n log_file.write(str(net))\n net.train()\n\n # loss counters\n batch_time = AverageMeter()\n losses = AverageMeter()\n loc_losses = AverageMeter()\n cls_losses = AverageMeter()\n\n print('Loading Dataset...')\n train_dataset = UCF24Detection(args.data_root, args.train_sets, SSDAugmentation(args.ssd_dim, args.means),\n AnnotationTransform(), input_type=args.input_type)\n val_dataset = UCF24Detection(args.data_root, 'test', BaseTransform(args.ssd_dim, args.means),\n AnnotationTransform(), input_type=args.input_type,\n full_test=False)\n epoch_size = len(train_dataset) // args.batch_size\n print('Training SSD on', train_dataset.name)\n\n if args.visdom:\n\n import visdom\n\n ### Modified: in order to use colab to visualize \n viz = visdom.Visdom(port='6006')\n #viz.port = args.vis_port\n viz.env = args.exp_name\n # initialize visdom loss plot\n lot = viz.line(\n X=torch.zeros((1,)).numpy(),\n Y=torch.zeros((1, 6)).numpy(),\n opts=dict(\n xlabel='Iteration',\n ylabel='Loss',\n title='Current SSD Training Loss',\n legend=['REG', 'CLS', 'AVG', 'S-REG', ' S-CLS', ' S-AVG']\n )\n )\n # initialize visdom meanAP and class APs plot\n legends = ['meanAP']\n for cls in CLASSES:\n legends.append(cls)\n val_lot = viz.line(\n X=torch.zeros((1,)).numpy(),\n Y=torch.zeros((1,args.num_classes)).numpy(),\n opts=dict(\n xlabel='Iteration',\n ylabel='Mean AP',\n title='Current SSD Validation mean AP',\n legend=legends\n )\n )\n\n\n batch_iterator = None\n train_data_loader = data.DataLoader(train_dataset, args.batch_size, num_workers=args.num_workers,\n shuffle=True, collate_fn=detection_collate, pin_memory=True)\n val_data_loader = data.DataLoader(val_dataset, args.batch_size, num_workers=args.num_workers,\n shuffle=False, collate_fn=detection_collate, pin_memory=True)\n itr_count = 0\n torch.cuda.synchronize()\n t0 = time.perf_counter()\n iteration = 0\n while iteration <= args.max_iter:\n for i, (images, targets, img_indexs) in enumerate(train_data_loader):\n\n if iteration > args.max_iter:\n break\n iteration += 1\n if args.cuda:\n images = Variable(images.cuda())\n targets = [Variable(anno.cuda(), volatile=True) for anno in targets]\n else:\n images = Variable(images)\n targets = [Variable(anno, volatile=True) for anno in targets]\n # forward\n out = net(images)\n # backprop\n optimizer.zero_grad()\n\n loss_l, loss_c = criterion(out, targets)\n loss = loss_l + loss_c\n loss.backward()\n optimizer.step()\n scheduler.step()\n loc_loss = loss_l.data[0]\n conf_loss = loss_c.data[0]\n # print('Loss data type ',type(loc_loss))\n loc_losses.update(loc_loss)\n cls_losses.update(conf_loss)\n losses.update((loc_loss + conf_loss)/2.0)\n\n\n if iteration % args.print_step == 0 and iteration>0:\n if args.visdom:\n losses_list = [loc_losses.val, cls_losses.val, losses.val, loc_losses.avg, cls_losses.avg, losses.avg]\n viz.line(X=torch.ones((1, 6)).numpy() * iteration,\n Y=torch.from_numpy(np.asarray(losses_list)).unsqueeze(0).numpy(),\n win=lot,\n update='append')\n\n\n torch.cuda.synchronize()\n t1 = time.perf_counter()\n batch_time.update(t1 - t0)\n\n print_line = 'Itration {:06d}/{:06d} loc-loss {:.3f}({:.3f}) cls-loss {:.3f}({:.3f}) ' \\\n 'average-loss {:.3f}({:.3f}) Timer {:0.3f}({:0.3f})'.format(\n iteration, args.max_iter, loc_losses.val, loc_losses.avg, cls_losses.val,\n cls_losses.avg, losses.val, losses.avg, batch_time.val, batch_time.avg)\n\n torch.cuda.synchronize()\n t0 = time.perf_counter()\n log_file.write(print_line+'\\n')\n print(print_line)\n\n # if args.visdom and args.send_images_to_visdom:\n # random_batch_index = np.random.randint(images.size(0))\n # viz.image(images.data[random_batch_index].cpu().numpy())\n itr_count += 1\n\n if itr_count % args.loss_reset_step == 0 and itr_count > 0:\n loc_losses.reset()\n cls_losses.reset()\n losses.reset()\n batch_time.reset()\n print('Reset accumulators of ', args.exp_name,' at', itr_count*args.print_step)\n itr_count = 0\n\n if (iteration % args.eval_step == 0 or iteration == 5000) and iteration>0:\n torch.cuda.synchronize()\n tvs = time.perf_counter()\n print('Saving state, iter:', iteration)\n torch.save(net.state_dict(), args.save_root+'ssd300_ucf24_' +\n repr(iteration) + '.pth')\n\n net.eval() # switch net to evaluation mode\n mAP, ap_all, ap_strs = validate(args, net, val_data_loader, val_dataset, iteration, iou_thresh=args.iou_thresh)\n\n for ap_str in ap_strs:\n print(ap_str)\n log_file.write(ap_str+'\\n')\n ptr_str = '\\nMEANAP:::=>'+str(mAP)+'\\n'\n print(ptr_str)\n log_file.write(ptr_str)\n\n if args.visdom:\n aps = [mAP]\n for ap in ap_all:\n aps.append(ap)\n viz.line(\n X=torch.ones((1, args.num_classes)).numpy() * iteration,\n Y=torch.from_numpy(np.asarray(aps)).unsqueeze(0).numpy(),\n win=val_lot,\n update='append'\n )\n net.train() # Switch net back to training mode\n torch.cuda.synchronize()\n t0 = time.perf_counter()\n prt_str = '\\nValidation TIME::: {:0.3f}\\n\\n'.format(t0-tvs)\n print(prt_str)\n log_file.write(ptr_str)\n\n log_file.close()\n\n\ndef validate(args, net, val_data_loader, val_dataset, iteration_num, iou_thresh=0.5):\n \"\"\"Test a SSD network on an image database.\"\"\"\n print('Validating at ', iteration_num)\n num_images = len(val_dataset)\n num_classes = args.num_classes\n\n det_boxes = [[] for _ in range(len(CLASSES))]\n gt_boxes = []\n print_time = True\n batch_iterator = None\n val_step = 100\n count = 0\n torch.cuda.synchronize()\n ts = time.perf_counter()\n\n for val_itr in range(len(val_data_loader)):\n if not batch_iterator:\n batch_iterator = iter(val_data_loader)\n\n torch.cuda.synchronize()\n t1 = time.perf_counter()\n\n images, targets, img_indexs = next(batch_iterator)\n batch_size = images.size(0)\n height, width = images.size(2), images.size(3)\n\n if args.cuda:\n images = Variable(images.cuda(), volatile=True)\n output = net(images)\n\n loc_data = output[0]\n conf_preds = output[1]\n prior_data = output[2]\n\n if print_time and val_itr%val_step == 0:\n torch.cuda.synchronize()\n tf = time.perf_counter()\n print('Forward Time {:0.3f}'.format(tf-t1))\n for b in range(batch_size):\n gt = targets[b].numpy()\n gt[:,0] *= width\n gt[:,2] *= width\n gt[:,1] *= height\n gt[:,3] *= height\n gt_boxes.append(gt)\n decoded_boxes = decode(loc_data[b].data, prior_data.data, args.cfg['variance']).clone()\n conf_scores = net.softmax(conf_preds[b]).data.clone()\n\n for cl_ind in range(1, num_classes):\n scores = conf_scores[:, cl_ind].squeeze()\n c_mask = scores.gt(args.conf_thresh) # greater than minmum threshold\n scores = scores[c_mask].squeeze()\n # print('scores size',scores.size())\n if scores.dim() == 0:\n # print(len(''), ' dim ==0 ')\n det_boxes[cl_ind - 1].append(np.asarray([]))\n continue\n boxes = decoded_boxes.clone()\n l_mask = c_mask.unsqueeze(1).expand_as(boxes)\n boxes = boxes[l_mask].view(-1, 4)\n # idx of highest scoring and non-overlapping boxes per class\n ids, counts = nms(boxes, scores, args.nms_thresh, args.topk) # idsn - ids after nms\n scores = scores[ids[:counts]].cpu().numpy()\n boxes = boxes[ids[:counts]].cpu().numpy()\n # print('boxes sahpe',boxes.shape)\n boxes[:,0] *= width\n boxes[:,2] *= width\n boxes[:,1] *= height\n boxes[:,3] *= height\n\n for ik in range(boxes.shape[0]):\n boxes[ik, 0] = max(0, boxes[ik, 0])\n boxes[ik, 2] = min(width, boxes[ik, 2])\n boxes[ik, 1] = max(0, boxes[ik, 1])\n boxes[ik, 3] = min(height, boxes[ik, 3])\n\n cls_dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=True)\n\n det_boxes[cl_ind-1].append(cls_dets)\n count += 1\n if val_itr%val_step == 0:\n torch.cuda.synchronize()\n te = time.perf_counter()\n print('im_detect: {:d}/{:d} time taken {:0.3f}'.format(count, num_images, te-ts))\n torch.cuda.synchronize()\n ts = time.perf_counter()\n if print_time and val_itr%val_step == 0:\n torch.cuda.synchronize()\n te = time.perf_counter()\n print('NMS stuff Time {:0.3f}'.format(te - tf))\n print('Evaluating detections for itration number ', iteration_num)\n return evaluate_detections(gt_boxes, det_boxes, CLASSES, iou_thresh=iou_thresh)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.optim.lr_scheduler.MultiStepLR", "torch.set_default_tensor_type", "torch.cuda.synchronize", "numpy.hstack", "torch.ones", "numpy.random.seed", "torch.load", "torch.zeros", "torch.manual_seed", "numpy.asarray", "torch.utils.data.DataLoader", "torch.cuda.manual_seed_all", "torch.optim.SGD", "torch.nn.init.xavier_uniform", "torch.autograd.Variable" ] ]
mohamedameen93/An-Autonomous-Vehicle-System-For-Udacity-s-Carla
[ "a7ecd632c5f91f2d6037cbcb0cabff94e6e74869" ]
[ "ros/src/tl_detector/light_classification/tl_classifier.py" ]
[ "import tensorflow as tf\nfrom styx_msgs.msg import TrafficLight\n\n\nPROTOBUG_GRAPH_FILE = 'light_classification/retrained_SSD/frozen_inference_graph.pb'\n\n\nclass TLClassifier(object):\n def __init__(self):\n graph = tf.Graph()\n with graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PROTOBUG_GRAPH_FILE, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n self.sess = tf.Session(graph=graph)\n\n with self.sess.as_default():\n self.image_ = graph.get_tensor_by_name('image_tensor:0')\n self.boxes_ = graph.get_tensor_by_name('detection_boxes:0')\n self.scores_ = graph.get_tensor_by_name('detection_scores:0')\n self.classes_ = graph.get_tensor_by_name('detection_classes:0')\n\n self.category_index = {\n 1: TrafficLight.GREEN,\n 2: TrafficLight.RED,\n 3: TrafficLight.YELLOW,\n 4: TrafficLight.UNKNOWN,\n }\n\n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n with self.sess.as_default():\n tensors_ = [self.boxes_, self.scores_, self.classes_]\n feed_dict = {self.image_: [image]}\n boxes, scores, classes = self.sess.run(tensors_, feed_dict)\n\n most_probable_class = int(classes[0][0])\n return self.category_index[most_probable_class]\n" ]
[ [ "tensorflow.Graph", "tensorflow.import_graph_def", "tensorflow.gfile.GFile", "tensorflow.Session", "tensorflow.GraphDef" ] ]
haokui/octobus
[ "66ba4aaf24cc43cee0f1fec226df09a451b513c1" ]
[ "octobus/base.py" ]
[ "\"\"\"\ncore APIs of the data models\n\"\"\"\nfrom collections import defaultdict\nimport numpy as np\nimport pandas as pd\n\n\nclass FeatureSet:\n \"\"\"\n currently, just a named list of feature names\n \"\"\"\n def __init__(self, feature_names=None, name=None):\n self.feature_names = feature_names\n self.name = name\n\n def __getitem__(self, key):\n return self.feature_names[key]\n\n\nclass DataSource:\n def __init__(self, dataframe):\n self.data = dataframe\n\n @property\n def entities(self):\n return self.data.index\n\n @property\n def features(self):\n return self.data.columns\n\n def __getitem__(self, key):\n return DataSource(self.data[key])\n\n @property\n def loc(self):\n return self.data.loc\n\n\nclass DataStore:\n def __init__(self):\n self.data_store = {}\n\n def register_features(self, features):\n # check for valid features\n print('ingested features: {}'.format(len(features)))\n\n def register_entities(self, entities):\n # check for valid entities\n print('ingested entities: {}'.format(len(entities)))\n\n def register_data(self, name, data):\n if name in self.data_store:\n raise '{} is already registered'.format(name)\n print('ingested data source name: {}'.format(name))\n self.register_features(data.features)\n self.register_entities(data.entities)\n self.data_store[name] = data\n\n def ingest(self, name, dataframe, entity_idx=None):\n _data = dataframe\n if entity_idx:\n # todo: do not change the original dataframe ???\n _data.set_index(entity_idx, inplace=True)\n self.register_data(name=name,\n data=DataSource(_data))\n\n def __getitem__(self, key):\n return self.data_store[key]\n\n\nclass FeatureStore:\n def __init__(self, data_source, features=None, name='unnamed'):\n if features is None:\n features = data_source.features\n self.features = features\n self.data = data_source[features]\n self.name = name\n # TODO: add timestamp for feature context sorting\n\n @property\n def entities(self):\n return self.data.entities\n\n def __getitem__(self, key):\n return self.data[key]\n\n @property\n def loc(self):\n return self.data.loc\n\n def __repr__(self):\n return '{} [feature store]'.format(self.name)\n\n\nclass FeatureContext:\n \"\"\"\n store and manage the feature context in a first-in-last-out manner\n \"\"\"\n def __init__(self):\n # each feature is mapped to a list of feature stores\n # in a first-in-last-out manner\n self.feature_context = defaultdict(list)\n\n def append(self, feature_store, features=None):\n if features is None:\n features = feature_store.features\n for feature in features:\n self.feature_context[feature].append(feature_store)\n\n def pop(self, feature_store, features=None):\n if features is None:\n features = feature_store.features\n for feature in features:\n if self.feature_context[feature][-1] is feature_store:\n self.feature_context[feature].pop()\n\n def __getitem__(self, feature):\n feature_stores = self.feature_context[feature]\n if len(feature_stores) == 0:\n return None\n else:\n return self.feature_context[feature][-1]\n\n @property\n def features(self):\n return [feature_name for feature_name, feature_stores in self.feature_context.items()\n if len(feature_stores) > 0]\n\n\nclass SampleStore:\n def __init__(self, samples=None):\n self.samples = set()\n if samples is not None:\n self.add_samples(samples)\n self.feature_context = FeatureContext()\n\n def add_samples(self, samples):\n self.samples.update(samples)\n\n def filter(self, condition):\n filter_samples = condition[condition].index\n self.samples.difference_update(filter_samples)\n\n def get_data(self, features=None):\n if features is None:\n features = self.feature_context.features\n collected_stores = defaultdict(list)\n for feature in features:\n fs = self.feature_context[feature]\n if fs is not None:\n collected_stores[fs].append(feature)\n data = [fs.loc[list(self.samples), features] for fs, features in collected_stores.items()]\n return pd.concat(data, axis=1)\n\n def add_features(self, feature_store, features=None):\n self.feature_context.append(feature_store=feature_store,\n features=features)\n\n def remove_features(self, feature_store, features=None):\n self.feature_context.pop(feature_store=feature_store,\n features=features)\n\n def __getitem__(self, feature):\n feature_store = self.feature_context[feature]\n if feature_store is None:\n raise 'No such feature for in this sample store: {}'.format(feature)\n\n return feature_store.loc[list(self.samples), feature]\n\n\nclass DataSet:\n def __init__(self, X, y=None):\n self.samples = X.index\n self.features = X.columns\n self.X = X\n self.y = None\n if y is not None:\n self.set_y(y)\n\n def set_y(self, y):\n assert y.index.equals(self.samples)\n self.y = y.loc[self.samples]\n\n def get_data(self, with_y=True):\n if with_y:\n if self.y is None:\n y = pd.Series(np.nan, index=self.samples)\n else:\n y = self.y\n return pd.concat([self.X, y], axis=1)\n else:\n return self.X\n\n\nclass FeatureManager:\n def __init__(self, feature_store, sample_store, features=None, verbose=1):\n self.feature_store = feature_store\n self.sample_store = sample_store\n self.verbose = verbose\n if features is None:\n self.features = feature_store.features\n\n def __enter__(self):\n self.sample_store.add_features(self.feature_store, features=self.features)\n if self.verbose > 0:\n print('>>> {} features are added with {}'.format(len(self.features), repr(self.feature_store)))\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.sample_store.remove_features(self.feature_store, features=self.features)\n if self.verbose > 0:\n print('<<< {} features are removed with {}'.format(len(self.features), repr(self.feature_store)))\n" ]
[ [ "pandas.concat", "pandas.Series" ] ]
rajvi-tiwari/Vision_for_trash_bot
[ "6ea82d0281d5ef1f6bbc6ad1bce07b43d74cff13" ]
[ "Object_detection_video.py" ]
[ "# This program uses a TensorFlow-trained classifier to perform object detection.\r\n# It loads the classifier and uses it to perform object detection on a video.\r\n# It draws boxes, scores, and labels around the objects of interest in each\r\n# frame of the video.\r\n\r\n## Some of the code is copied from Google's example at\r\n## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\r\n\r\n## and some is copied from Dat Tran's example at\r\n## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py\r\n\r\n## but I changed it to make it more understandable to me.\r\n\r\n# Import packages\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport sys\r\n\r\n# This is needed since the notebook is stored in the object_detection folder.\r\nsys.path.append(\"..\")\r\n\r\n# Import utilites\r\nfrom utils import label_map_util\r\nfrom utils import visualization_utils as vis_util\r\n\r\n# Name of the directory containing the object detection module we're using\r\nMODEL_NAME = 'inference_graph'\r\nVIDEO_NAME = 'test.mov'\r\n\r\n# Grab path to current working directory\r\nCWD_PATH = os.getcwd()\r\n\r\n# Path to frozen detection graph .pb file, which contains the model that is used\r\n# for object detection.\r\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\r\n\r\n# Path to label map file\r\nPATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')\r\n\r\n# Path to video\r\nPATH_TO_VIDEO = os.path.join(CWD_PATH,VIDEO_NAME)\r\n\r\n# Number of classes the object detector can identify\r\nNUM_CLASSES = 4\r\n\r\n# Load the label map.\r\n# Label maps map indices to category names, so that when our convolution\r\n# network predicts `5`, we know that this corresponds to `king`.\r\n# Here we use internal utility functions, but anything that returns a\r\n# dictionary mapping integers to appropriate string labels would be fine\r\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\r\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\r\ncategory_index = label_map_util.create_category_index(categories)\r\n\r\n# Load the Tensorflow model into memory.\r\ndetection_graph = tf.Graph()\r\nwith detection_graph.as_default():\r\n od_graph_def = tf.GraphDef()\r\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n\r\n sess = tf.Session(graph=detection_graph)\r\n\r\n# Define input and output tensors (i.e. data) for the object detection classifier\r\n\r\n# Input tensor is the image\r\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\r\n\r\n# Output tensors are the detection boxes, scores, and classes\r\n# Each box represents a part of the image where a particular object was detected\r\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\r\n\r\n# Each score represents level of confidence for each of the objects.\r\n# The score is shown on the result image, together with the class label.\r\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\r\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\r\n\r\n# Number of objects detected\r\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\r\n\r\n# Open video file\r\nvideo = cv2.VideoCapture(PATH_TO_VIDEO)\r\n\r\nwhile(video.isOpened()):\r\n\r\n # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]\r\n # i.e. a single-column array, where each item in the column has the pixel RGB value\r\n ret, frame = video.read()\r\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n frame_expanded = np.expand_dims(frame_rgb, axis=0)\r\n\r\n # Perform the actual detection by running the model with the image as input\r\n (boxes, scores, classes, num) = sess.run(\r\n [detection_boxes, detection_scores, detection_classes, num_detections],\r\n feed_dict={image_tensor: frame_expanded})\r\n\r\n # Draw the results of the detection (aka 'visulaize the results')\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n frame,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=8,\r\n min_score_thresh=0.60)\r\n\r\n # All the results have been drawn on the frame, so it's time to display it.\r\n cv2.imshow('Object detector', frame)\r\n\r\n # Press 'q' to quit\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n\r\n# Clean up\r\nvideo.release()\r\ncv2.destroyAllWindows()\r\n" ]
[ [ "tensorflow.Graph", "numpy.expand_dims", "tensorflow.import_graph_def", "tensorflow.gfile.GFile", "numpy.squeeze", "tensorflow.Session", "tensorflow.GraphDef" ] ]
chahuja/aisle
[ "08d854f837767129eb454d32db8814d3b51a071e" ]
[ "src/data/transform.py" ]
[ "import os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nimport pickle as pkl\nfrom pathlib import Path\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport sklearn.cluster\nimport sklearn.mixture\nimport numpy as np\nimport pdb\n\nfrom dataUtils import DummyData\nfrom text import POStagging\nfrom common import HDF5\nfrom skeleton import Skeleton2D\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom pycasper import torchUtils\n\n\nclass TransformDict():\n '''\n Convert a Transform Class which accept dictionaries as inputs\n\n Args:\n transform: ``Transform`` object\n\n Example:\n >>> TranslateDict = TransformDict(Translate(1))\n >>> print(TranslateDict({'tensor': torch.zeros(3)}))\n >>> {'tensor': Tensor([1., 1., 1.])}\n '''\n def __init__(self, transform):\n self.transform = transform\n\n def __call__(self, batch, **kwargs):\n batch_new = {}\n for variable in batch:\n batch_new[variable] = self.transform(batch[variable], **kwargs)\n return batch_new\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '({})'.format(self.transform)\n return format_string\n\nclass Compose():\n \"\"\"Composes several transforms together.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> Compose([\n >>> ZNorm(['pose/data'], key='oliver'),\n >>> TransformDict(Translate(10))\n >>> ])\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img, inv=False, **kwargs):\n transforms = self.transforms if not inv else self.transforms[::-1]\n\n for t in transforms:\n if isinstance(t, TransformDict):\n if t.transform.__call__.__code__.co_argcount == 2:\n img = t(img)\n elif t.transform.__call__.__code__.co_argcount == 3 and kwargs:\n img = t(img, inv=inv, **kwargs)\n elif t.transform.__call__.__code__.co_argcount == 3:\n img = t(img, inv=inv)\n else:\n assert 0, 'all transforms must have only one or two arguments'\n else:\n if t.__call__.__code__.co_argcount == 2:\n img = t(img)\n elif t.__call__.__code__.co_argcount == 3 and kwargs:\n img = t(img, inv=inv, **kwargs)\n elif t.__call__.__code__.co_argcount == 3:\n img = t(img, inv=inv)\n else:\n assert 0, 'all transforms must have only one or two arguments'\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass ZNorm():\n '''\n Class to calculate ZNorm on the complete data\n\n Arguments:\n variable_list (list of str, optional):\n savepath (str): path to the folder where the mean and variances are stored\n key (str): name of the file\n data (DataLoader): a data loader which iterates through all of the data\n num_dims (int, optional): number of dimensions from the left along\n which mean/var is calculated. For example, if ``2``, an input\n of shape ``(10, 20, 30, 40)`` would be reduced to a mean\n of shape ``(1, 1, 30, 40)``. (default: ``2``)\n\n Example::\n from data import ZNorm\n from torch.utils.data import DataLoader\n from data import DummyData\n\n variable_list = ['pose', 'audio']\n data = DataLoader(DummyData(variable_list=variable_list, random=True), batch_size=1000)\n pre = ZNorm(['audio', 'pose'], savepath='./preprocessing_temp', data=data)\n for batch in data:\n break\n mean = pre(batch)['pose'].mean()\n std = pre(batch)['pose'].std()\n print('Mean: {}, std: {} after Ztransform'.format(mean, std))\n '''\n def __init__(self, variable_list=[], savepath='./preprocessing/muvar', key='key', data=None, num_dims=2, verbose=True, relative2parent=0, pre=None):\n os.makedirs(savepath, exist_ok=True)\n self.variable_list = variable_list\n self.savepath = savepath\n self.key = '_'.join(key) if isinstance(key, list) else key\n self.data = data\n self.relative2parent = relative2parent\n self.pre = pre ## use a pre on the data before calculating mean var\n self.hdf5 = HDF5()\n\n self.variable_dict = {}\n\n\n for variable in variable_list:\n if relative2parent:\n path2file = Path(savepath)/'{}_relative2parent.h5'.format(self.key)\n else:\n path2file = Path(savepath)/(self.key+'.h5')\n if path2file.exists():\n muvar = self.loadfile(path2file, variable)\n if muvar is None:\n muvar = self.cal_muvar(path2file, variable, num_dims=num_dims)\n if verbose:\n print('Calculating Mean-Variance for {}'.format(variable))\n else:\n if verbose:\n print('Loading Mean-Variance for {}'.format(variable))\n else:\n muvar = self.cal_muvar(path2file, variable, num_dims=num_dims)\n if verbose:\n print('Calculating Mean-Variance for {}'.format(variable))\n self.variable_dict[variable] = muvar\n\n def loadfile(self, path2file, variable):\n if not self.hdf5.isDatasetInFile(path2file, variable):\n return None\n mu = torch.from_numpy(\n self.hdf5.load(\n path2file,\n self.hdf5.add_key(variable, ['mean'])\n )[0][()]\n ).to(torch.double)\n var = torch.from_numpy(\n self.hdf5.load(\n path2file,\n self.hdf5.add_key(variable, ['var'])\n )[0][()]\n ).to(torch.double)\n return [mu, var]\n\n def savefile(self, obj, path2file, variable):\n self.hdf5.append(path2file,\n self.hdf5.add_key(variable, ['mean']),\n obj[0])\n self.hdf5.append(path2file,\n self.hdf5.add_key(variable, ['var']),\n obj[1])\n\n '''\n Calculate the mean and variance of the dataset\n\n Arguments:\n path2file (str): path to the file where the mean and variances are stored\n variable (str): variable for which the mean and variance has to be calculated\n num_dims (int, optional): number of dimensions from the left along\n which mean/var is calculated. For example, if ``2``, an input\n of shape ``(10, 20, 30, 40)`` would be reduced to a mean\n of shape ``(1, 1, 30, 40)``. (default: ``2``)\n '''\n def cal_muvar(self, path2file, variable, num_dims=2):\n mean = 0\n energy = 0\n count = 0\n collate_fn = self.data.collate_fn\n ## sample thorugh the complete dataset irrespective of the sampler\n dataloader = torch.utils.data.DataLoader(self.data.dataset, batch_size=32, collate_fn=collate_fn)\n #self.data.sampler = torch.utils.data.SequentialSampler(self.data.dataset)\n for batch in tqdm(dataloader, desc='mu+var for {}'.format(variable)):\n if self.pre is not None:\n batch = self.pre(batch)[variable]\n else:\n batch = batch[variable]\n dims = list(range(num_dims))\n mean += batch.sum(dim=dims, keepdim=True)\n energy += (batch**2).sum(dim=dims, keepdim=True)\n count += np.prod(np.array(batch.shape)[dims])\n mean = mean/count\n energy = energy/count\n var = energy - mean**2\n\n muvar = [mean, var]\n self.savefile(muvar, path2file, variable)\n return muvar\n\n def znorm(self, x, muvar, eps=1e-8):\n mask_std = (muvar[1] >= 0).to(torch.double)\n std = (muvar[1]*mask_std)**0.5\n mask = (std == 0).to(torch.double)\n std = (mask * eps) + (1-mask)*std\n return (x - muvar[0])/std\n\n def inv_znorm(self, x, muvar):\n return x*(muvar[1]**0.5) + muvar[0]\n\n def __call__(self, batch, inv=False):\n batch_new = {}\n for variable in batch:\n if variable in self.variable_dict:\n if not inv:\n batch_new[variable] = self.znorm(batch[variable], self.variable_dict[variable])\n else:\n batch_new[variable] = self.inv_znorm(batch[variable], self.variable_dict[variable])\n else:\n batch_new[variable] = batch[variable]\n return batch_new\n\n def __repr__(self):\n return self.__class__.__name__ + '(variable_list={}, key={})'.format(self.variable_list, self.key)\n\nclass PoseStarts():\n '''\n Class to calculate pose start positions\n\n Arguments:\n savepath (str): path to the folder where the mean and variances are stored\n key (str): name of the file\n data (DataLoader): a data loader which iterates through all of the data\n num_dims (int, optional): number of dimensions from the left along\n which mean/var is calculated. For example, if ``2``, an input\n of shape ``(10, 20, 30, 40)`` would be reduced to a mean\n of shape ``(1, 1, 30, 40)``. (default: ``2``)\n\n '''\n def __init__(self, variable_list=[], savepath='./preprocessing/poseStarts', key='key', data=None, num_dims=2, verbose=True, pre=None):\n os.makedirs(savepath, exist_ok=True)\n self.savepath = savepath\n self.key = '_'.join(key) if isinstance(key, list) else key\n self.data = data\n self.pre = pre ## use a pre on the data before calculating mean var\n self.hdf5 = HDF5()\n\n self.variable_dict = {}\n\n variable = None\n for Key in variable_list:\n if 'pose' in Key:\n variable = Key\n break\n \n path2file = Path(savepath)/(self.key+'.h5')\n if path2file.exists():\n muvar = self.loadfile(path2file, variable)\n if muvar is None:\n muvar = self.cal_muvar(path2file, variable, num_dims=num_dims)\n if verbose:\n print('Calculating Mean-Variance for {}'.format(variable))\n else:\n if verbose:\n print('Loading Mean-Variance for {}'.format(variable))\n else:\n muvar = self.cal_muvar(path2file, variable, num_dims=num_dims)\n if verbose:\n print('Calculating Mean-Variance for {}'.format(variable))\n self.variable_dict[variable] = muvar\n\n def loadfile(self, path2file, variable):\n if not self.hdf5.isDatasetInFile(path2file, variable):\n return None\n mu = torch.from_numpy(\n self.hdf5.load(\n path2file,\n self.hdf5.add_key(variable, ['mean'])\n )[0][()]\n ).to(torch.double)\n var = torch.from_numpy(\n self.hdf5.load(\n path2file,\n self.hdf5.add_key(variable, ['var'])\n )[0][()]\n ).to(torch.double)\n return [mu, var]\n\n def savefile(self, obj, path2file, variable):\n self.hdf5.append(path2file,\n self.hdf5.add_key(variable, ['mean']),\n obj[0])\n self.hdf5.append(path2file,\n self.hdf5.add_key(variable, ['var']),\n obj[1])\n\n def get_vel(self, x):\n return x[:, 1:] - x[:, :-1]\n \n def get_acc(self, x):\n return self.get_vel(torch.abs(self.get_vel(x)))\n\n @property\n def jointMask(self):\n return [2, 3, 5, 6]\n \n '''\n Calculate the mean and variance of acceleration of the dataset\n\n Arguments:\n path2file (str): path to the file where the mean and variances are stored\n variable (str): variable for which the mean and variance has to be calculated\n num_dims (int, optional): number of dimensions from the left along\n which mean/var is calculated. For example, if ``2``, an input\n of shape ``(10, 20, 30, 40)`` would be reduced to a mean\n of shape ``(1, 1, 30, 40)``. (default: ``2``)\n '''\n def cal_muvar(self, path2file, variable, num_dims=2):\n mean = 0\n energy = 0\n count = 0\n collate_fn = self.data.collate_fn\n ## sample thorugh the complete dataset irrespective of the sampler\n dataloader = torch.utils.data.DataLoader(self.data.dataset, batch_size=32, collate_fn=collate_fn)\n #self.data.sampler = torch.utils.data.SequentialSampler(self.data.dataset)\n \n for batch in tqdm(dataloader, desc='mu+var for {}'.format(variable)):\n if self.pre is not None:\n batch = self.pre(batch)[variable]\n else:\n batch = batch[variable]\n batch = self.get_acc(batch)##[..., self.jointMask].sum(dim=-2, keepdim=True) ## get acceleration\n dims = list(range(num_dims))\n mean += batch.sum(dim=dims, keepdim=True)\n energy += (batch**2).sum(dim=dims, keepdim=True)\n count += np.prod(np.array(batch.shape)[dims])\n mean = mean/count\n energy = energy/count\n var = energy - mean**2\n\n muvar = [mean, var]\n self.savefile(muvar, path2file, variable)\n return muvar\n\n def get_startsC(self, starts):\n ## add 2 dimensions to account for missing ones because of accelaration\n startsC = torch.zeros(starts.shape[0], starts.shape[1]+2)\n for r in range(starts.shape[0]):\n c_ = -1\n for c in range(starts.shape[1]):\n if starts[r, c] == 1:\n length = c - c_\n startsC[r, c_+1:c+1] = torch.sin(np.pi*torch.linspace(0, 1-1/length, length)) \n c_ = c\n length = c + 1 - c_ \n startsC[r, c_+1:c+2] = torch.sin(np.pi*torch.linspace(0, 1-1/length, length))\n return startsC\n \n def forward(self, x, muvar, eps=1e-8):\n mask_std = (muvar[1] >= 0).to(torch.double)\n std = (muvar[1]*mask_std)**0.5\n mask = (std == 0).to(torch.double)\n std = (mask * eps) + (1-mask)*std\n starts = self.get_acc(x) > (muvar[0] + 2*std)\n # mask to get elbows and wrists\n starts = starts.view(starts.shape[0], starts.shape[1], 2, -1)[..., self.jointMask] \n starts = starts.view(starts.shape[0], starts.shape[1], -1)\n starts = starts.double().max(dim=-1).values\n startsC = self.get_startsC(starts)\n starts = torch.cat([torch.ones(starts.shape[0], 1).double(), starts,\n torch.ones(starts.shape[0], 1).double()], dim=-1)\n return starts, startsC\n \n def __call__(self, batch, inv=False):\n key = None\n for Key in batch.keys():\n if 'pose' in Key:\n key = Key\n break\n\n if not inv:\n batch['pose/starts'], batch['pose/startsC'] = self.forward(batch[key], self.variable_dict[key])\n\n return batch\n\n def __repr__(self):\n return self.__class__.__name__ + '(variable_list={}, key={})'.format(self.variable_list, self.key)\n\n\nclass KMeans():\n '''\n ### TODO: Does not work with Compose at the moment\n Class to calculate KMeans on the complete data\n\n Arguments:\n variable_list (list of str, optional):\n savepath (str): path to the folder where the mean and variances are stored\n key (str): name of the file\n data (DataLoader): a data loader which iterates through all of the data\n num_dims (int, optional): number of dimensions from the left along\n which mean/var is calculated. For example, if ``2``, an input\n of shape ``(10, 20, 30, 40)`` would be reduced to a mean\n of shape ``(1, 1, 30, 40)``. (default: ``2``)\n\n Example::\n from data import KMeans\n from torch.utils.data import DataLoader\n from data import DummyData\n\n variable_list = ['pose', 'audio']\n device = 'cuda'\n data = DataLoader(DummyData(variable_list=variable_list, random=True), batch_size=1000)\n kmeans = KMeans(savepath='./preprocessing_temp', data=data, device=device)\n for batch in data:\n break\n predictions = kmeans(batch['pose/data'])\n '''\n def __init__(self, variable_list = [], savepath='./preprocessing/kmeans', key='key', data=None, num_clusters=8, mask=[0, 7, 8, 9], feats=['pose', 'velocity'], verbose=True):\n os.makedirs(savepath, exist_ok=True)\n self.variable_list = variable_list\n self.variable = variable_list[0]\n self.savepath = savepath\n self.key = '_'.join(key) if isinstance(key, list) else key\n self.data = data\n self.num_clusters = num_clusters\n self.mask = mask\n self.remove_joints = RemoveJoints(self.mask)\n self.feats = feats\n\n # self.muvarpath = './preprocessing/muvar'\n # self.hdf5 = HDF5()\n # self.variable_dict = {}\n\n # for v in variable_list:\n # path2file = Path(self.muvarpath)/(self.key+'.h5')\n # if path2file.exists():\n # muvar = ZNorm.loadfile(self, path2file, v)\n # if muvar is None:\n # muvar = ZNorm.cal_muvar(path2file, v, num_dims=num_dims)\n # if verbose:\n # print('Calculating Mean-Variance for {}'.format(v))\n # else:\n # if verbose:\n # print('Loading Mean-Variance for {}'.format(v))\n # else:\n # muvar = ZNorm.cal_muvar(path2file, v, num_dims=num_dims)\n # if verbose:\n # print('Calculating Mean-Variance for {}'.format(v))\n # self.variable_dict[v] = muvar\n\n pre = ZNorm(variable_list, key=key, data=self.data, verbose=False)\n self.variable_dict = pre.variable_dict\n for var in variable_list:\n if var in ['pose/data', 'pose/normalize']:\n self.output_modality = var\n break\n else:\n raise 'pose variable not found in variable_list'\n\n\n self.hdf5 = HDF5()\n\n path2file = Path(savepath)/(self.key+'.h5')\n self.centers = None\n key_name = 'centers/{}'.format(self.num_clusters) + '_{}'*len(self.feats)\n key_name = key_name.format(*self.feats)\n key_name += '_{}'*len(self.mask)\n key_name = key_name.format(*self.mask)\n key_name += '_{}'\n key_name = key_name.format('_'.join(self.variable.split('/')))\n\n if path2file.exists():\n if self.hdf5.isDatasetInFile(path2file, key_name):\n self.centers, h5 = self.hdf5.load(path2file, key_name)\n self.centers = self.centers[()]\n h5.close()\n if verbose:\n print('Loading KMeans model for {}/{}'.format(key, key_name))\n else:\n if verbose:\n print('Calculating KMeans model for {}/{}'.format(key, key_name))\n with torch.no_grad():\n self.centers = self.get_kmeans()\n self.hdf5.append(path2file, key_name, self.centers)\n else:\n if verbose:\n print('Calculating KMeans model for {}/{}'.format(key, key_name))\n with torch.no_grad():\n self.centers = self.get_kmeans()\n self.hdf5.append(path2file, key_name, self.centers)\n\n self.centers = torch.from_numpy(self.centers)\n\n\n def get_feats(self, x):\n pose_list = []\n for feat in self.feats:\n if feat == 'pose':\n pose_list.append(x)\n if feat == 'velocity':\n pose_v = torch.zeros_like(x)\n pose_v[:, 1:, :] = x[:, 1:] - x[:, :-1]\n pose_list.append(pose_v)\n if feat == 'speed':\n pose_s = torch.zeros_like(x)\n pose_s[:, 1:, :] = x[:, 1:] - x[:, :-1]\n pose_s = pose_s.reshape(pose_s.shape[0], pose_s.shape[1], 2, -1)\n pose_s = (pose_s**2).sum(dim=-2) ** 0.5 ## calculating speed from velocity\n pose_list.append(pose_s)\n if feat == 'acceleration':\n pose_v = torch.zeros_like(x)\n pose_v[:, 1:, :] = x[:, 1:] - x[:, :-1]\n pose_a = torch.zeros_like(x)\n pose_a[:, 1:, :] = pose_v[:, 1:] - pose_v[:, :-1]\n #pose_a = (pose_a**2).sum(dim=-2) ** 0.5 ## calculating speed from velocity\n pose_list.append(pose_a)\n if feat == 'spatial':\n mean = self.variable_dict[self.output_modality][0][:,:,8:]\n pose_se = torch.zeros_like(x)\n pose_se = x - mean # just the elbow joints?\n pose_list.append(pose_se)\n return torch.cat(pose_list, dim=-1)\n\n def get_kmeans(self):\n model = sklearn.cluster.MiniBatchKMeans(n_clusters=self.num_clusters)\n collate_fn = self.data.collate_fn\n ## sample thorugh the complete dataset irrespective of the sampler\n dataloader = torch.utils.data.DataLoader(self.data.dataset, batch_size=32, collate_fn=collate_fn)\n\n for batch in tqdm(dataloader):\n pose = batch[self.variable]\n pose = self.remove_joints(pose)\n pose = self.get_feats(pose)\n pose = pose.view(-1, pose.shape[-1])\n model.partial_fit(pose)\n centers = model.cluster_centers_\n return centers\n\n def predict(self, x, **kwargs):\n x = x.double()\n x = self.get_feats(x)\n x_shape = list(x.shape)\n x = x.view(-1, 1, x_shape[-1])\n centers_shape = [1] + list(self.centers.shape)\n\n mse = ((self.centers.view(*centers_shape).to(x.device) - x)**2).sum(dim=-1)\n if kwargs:\n if kwargs['soft_labels']:\n labels = torch.nn.functional.softmax(-mse/mse.mean(-1).unsqueeze(-1), dim=-1).view(x_shape[:-1] + [centers_shape[1]])\n else:\n labels = mse.min(dim=-1)[1].view(x_shape[:-1])\n else:\n labels = mse.min(dim=-1)[1].view(x_shape[:-1])\n return labels\n\n def inv_predict(self, y, **kwargs):\n y_shape = list(y.shape) + [self.centers.shape[-1]]\n y = y.view(-1)\n return self.centers.to(y.device)[y].view(*y_shape)\n\n def update(self, batch):\n pass\n\n def __call__(self, batch, inv=False, **kwargs):\n if not inv:\n return self.predict(batch, **kwargs)\n else:\n return self.inv_predict(batch, **kwargs)\n\n def __repr__(self):\n return self.__class__.__name__ + '(variable_list={}, key={})'.format(self.variable, self.key)\n\n## Incomplete, need to implement get_kmeans, predict and inverse_predicg\nclass GMM(KMeans):\n def __init__(self, variable_list = [], savepath='./preprocessing/kmeans', key='key', data=None, num_clusters=8, mask=[0, 7, 8, 9], feats=['pose', 'velocity'], verbose=True):\n super().__init__(variable_list=variable_list, savepath='./preprocessing/gmm', key=key, data=data, num_clusters=num_clusters, mask=mask, feats=feats, verbose=verbose)\n\n def get_kmeans(self):\n model = sklearn.mixture.GaussianMixture(n_clusters=self.num_clusters)\n poses = []\n for batch in tqdm(self.data):\n pose = batch[self.variable]\n pose = self.remove_joints(pose)\n pose = self.get_feats(pose)\n pose = pose.view(-1, pose.shape[-1])\n poses.append(pose)\n pose = torch.cat(poses, dim=0)\n model.fit(pose)\n centers = model.cluster_centers_\n return centers\n\n def predict(self, x, **kwargs):\n x = x.double()\n x = self.get_feats(x)\n x_shape = list(x.shape)\n x = x.view(-1, 1, x_shape[-1])\n centers_shape = [1] + list(self.centers.shape)\n\n mse = ((self.centers.view(*centers_shape).to(x.device) - x)**2).sum(dim=-1)\n if kwargs:\n if kwargs['soft_labels']:\n labels = torch.nn.functional.softmax(-mse/mse.mean(-1).unsqueeze(-1), dim=-1).view(x_shape[:-1] + [centers_shape[1]])\n else:\n labels = mse.min(dim=-1)[1].view(x_shape[:-1])\n else:\n labels = mse.min(dim=-1)[1].view(x_shape[:-1])\n return labels\n\n def inv_predict(self, y, **kwargs):\n y_shape = list(y.shape) + [self.centers.shape[-1]]\n y = y.view(-1)\n return self.centers.to(y.device)[y].view(*y_shape)\n\n\nclass POSCluster(POStagging):\n def __init__(self):\n super().__init__()\n self.labels = None\n\n def update(self, batch):\n if 'text/pos' in batch:\n self.labels = batch.get('text/pos').long()\n else:\n raise 'add `text/pos` in args.modalities'\n\n def __call__(self, batch, inv=False, **kwargs):\n if self.labels is not None:\n return self.labels\n else:\n raise 'call POSCluster.update before calling the object'\n\nclass Translate():\n def __init__(self, offset):\n self.offset = offset\n\n def __call__(self, batch, inv=False):\n if inv:\n return batch - self.offset\n else:\n return batch + self.offset\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\nclass RandomTranslate():\n def __init__(self, max=[100, 50], mask=[0,7,8,9], skel=None, random=1, znorm=None, output_modality='pose/normalize'):\n self.max = torch.Tensor(max).double().view(1,1,2,1)\n self.mask = mask\n self.skel = skel\n self.joint_L = self.skel.joint_left(self.mask)\n self.joint_R = self.skel.joint_right(self.mask)\n self.random = random\n self.znorm = znorm\n self.output_modality = output_modality\n\n def __call__(self, batch, inv=False):\n rand_fn = getattr(torch, 'rand') if self.random else getattr(torch, 'ones')\n offset_L = (rand_fn(1,1,2,1)*2 - 1) * self.max\n offset_R = (rand_fn(1,1,2,1)*2 - 1) * self.max\n\n if self.znorm is not None:\n offset_L = self.scale_translation(offset_L, self.joint_L)\n offset_R = self.scale_translation(offset_R, self.joint_R)\n \n batch = batch.view(batch.shape[0], batch.shape[1], 2, -1)\n with torch.no_grad():\n batch[..., self.joint_L] += offset_L.to(batch.device)\n batch[..., self.joint_R] += offset_R.to(batch.device)\n\n return batch.view(batch.shape[0], batch.shape[1], -1)\n\n def scale_translation(self, x, mask, eps=1e-8):\n var = self.znorm.variable_dict[self.output_modality][1].view(1, 1, 2, -1)[..., mask]\n mask_std = (var >= 0).to(torch.double)\n std = (var*mask_std)**0.5\n mask = (std == 0).to(torch.double)\n std = (mask * eps) + (1-mask)*std\n return x/std\n #return x*(var**0.5)\n\nclass Relative2Parent():\n def __init__(self, parents=None):\n if parents is None:\n self.parents = Skeleton2D().parents\n else:\n self.parents = parents\n\n def inv(self, pose):\n for i, parent in enumerate(self.parents[1:]):\n pose[..., i+1] += pose[..., parent]\n return pose\n\n def __call__(self, batch, inv=False):\n batch_new = {}\n for key in batch:\n if 'pose' in key:\n pose = batch[key].clone()\n pose = pose.view(pose.shape[0], pose.shape[1], 2, -1)\n pose[..., 0] = 0\n if inv:\n pose = self.inv(pose)\n else:\n pose[..., 1:] = pose[..., 1:] - pose[..., self.parents[1:]]\n pose[..., 0] = batch[key].view(pose.shape[0], pose.shape[1], 2, -1)[..., 0]\n pose = pose.view(pose.shape[0], pose.shape[1], -1)\n batch_new[key] = pose\n else:\n batch_new[key] = batch[key]\n\n return batch_new\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\nclass RemoveJoints():\n def __init__(self, mask, parents=None):\n self.mask = mask\n self.parents = parents\n #self.children = self.get_children()\n self.insert = None\n\n def get_children(self):\n if self.parents is None:\n return None\n children = {}\n for i, parent in enumerate(self.parents):\n if parent in children:\n children[parent].append(i)\n else:\n children[parent] = [i]\n return children\n\n def __call__(self, batch, inv=False, **kwargs):\n if inv:\n assert self.insert is not None, 'Call Remove Joints first before calling the inverse version'\n batch_cap = torchUtils.add_slices(batch.view(batch.shape[0], batch.shape[1], 2, -1),\n insert=self.insert,\n mask=self.mask,\n dim=-1)\n if self.parents is not None and 'batch_gt' in kwargs:\n ## Bring masked children close enough to the predicted parents for a better visualization.\n batch_gt = kwargs['batch_gt']\n batch_gt = batch_gt.view(batch_gt.shape[0], batch_gt.shape[1], 2, -1)\n for i in self.mask: ## must be in topological order\n if i != 0: ## ignore first joint\n j = self.parents[i]\n batch_cap[..., i] = (batch_gt[..., i] - batch_gt[..., j]) + batch_cap[..., j]\n\n else:\n batch = batch.view(batch.shape[0], batch.shape[1], 2, -1)\n batch, insert = torchUtils.remove_slices(batch, mask=self.mask, dim=-1)\n ## use save_insert=False to not save insert to self.insert\n if kwargs.get('save_insert') is None or kwargs.get('save_insert') is True:\n self.insert = insert\n self.insert = self.insert.to('cpu')\n\n batch_cap = batch\n\n return batch_cap.view(batch_cap.shape[0], batch_cap.shape[1], -1)\n\n def __repr__(self):\n return self.__class__.__name__ + '(mask={})'.format(self.mask)\n\n\nif __name__ == '__main__':\n variable_list = ['pose', 'audio']\n data = DataLoader(DummyData(variable_list=variable_list), batch_size=1000)\n pre = ZNorm(['pose'], savepath='./preprocessing_temp', data=data)\n for batch in data:\n break\n mean = pre(batch)['pose'].mean()\n std = pre(batch)['pose'].std()\n print('Mean: {}, std: {} after Ztransform'.format(mean, std))\n" ]
[ [ "torch.linspace", "torch.ones", "torch.Tensor", "torch.cat", "torch.zeros", "torch.utils.data.DataLoader", "torch.from_numpy", "torch.zeros_like", "torch.no_grad", "numpy.array" ] ]
sarmstr5/kaggle_intel_mobleODT_cervix_classification
[ "800e713a30827fc3701525b8e64c39ae97b7634a" ]
[ "src/data_explore.py" ]
[ "import platform\nimport os, pickle\nimport pandas as pd\n\n\n\n\ndef get_file_paths():\n if 'c001' in platform.node(): \n# colfax cluster\n abspath_dataset_dir_train_1 = '/data/kaggle/train/Type_1'\n abspath_dataset_dir_train_2 = '/data/kaggle/train/Type_2'\n abspath_dataset_dir_train_3 = '/data/kaggle/train/Type_3'\n abspath_dataset_dir_test = '/data/kaggle/test/'\n abspath_dataset_dir_add_1 = '/data/kaggle_3.27/additional/Type_1_v2'\n abspath_dataset_dir_add_2 = '/data/kaggle_3.27/additional/Type_2_v2'\n abspath_dataset_dir_add_3 = '/data/kaggle_3.27/additional/Type_3_v2'\n# local machine\n elif '.local' in platform.node():\n abspath_dataset_dir_train_1 = '/abspath/to/train/Type_1'\n abspath_dataset_dir_train_2 = '/abspath/to/train/Type_2'\n abspath_dataset_dir_train_3 = '/abspath/to/train/Type_3'\n abspath_dataset_dir_test = '/abspath/to/test/'\n abspath_dataset_dir_add_1 = '/abspath/to/additional/Type_1'\n abspath_dataset_dir_add_2 = '/abspath/to/additional/Type_2'\n abspath_dataset_dir_add_3 = '/abspath/to/additional/Type_3'\n else:\n# kaggle kernel\n abspath_dataset_dir_train_1 = '/kaggle/input/train/Type_1'\n abspath_dataset_dir_train_2 = '/kaggle/input/train/Type_2'\n abspath_dataset_dir_train_3 = '/kaggle/input/train/Type_3'\n abspath_dataset_dir_test = '/kaggle/input/test/'\n abspath_dataset_dir_add_1 = '/kaggle/input/additional/Type_1'\n abspath_dataset_dir_add_2 = '/kaggle/input/additional/Type_2'\n abspath_dataset_dir_add_3 = '/kaggle/input/additional/Type_3'\n\n return abspath_dataset_dir_train_1, abspath_dataset_dir_train_2, abspath_dataset_dir_train_3, abspath_dataset_dir_test, abspath_dataset_dir_add_1, abspath_dataset_dir_add_2, abspath_dataset_dir_add_3\n\n\ndef get_list_abspath_img(abspath_dataset_dir):\n list_abspath_img = []\n for str_name_file_or_dir in os.listdir(abspath_dataset_dir):\n if ('.jpg' in str_name_file_or_dir) == True:\n list_abspath_img.append(os.path.join(abspath_dataset_dir, str_name_file_or_dir))\n list_abspath_img.sort()\n return list_abspath_img\n\ndef save_dirs(dir, img_dirs, type='df'):\n if type == 'df':\n img_dirs.to_csv(dir, index=False)\n else:\n with open(dir, 'wb') as file:\n pickle.dump(dir_list, file)\n\n\ndef save_img_dirs():\n# dir = '~/kaggle_code/'\n# Location of the colfax cluster data\n dir = os.path.join(os.pardir, 'data/')\n\n# returns the directories of the data files\n dir_train_1, dir_train_2, dir_train_3, dir_test, dir_add_1, dir_add_2, dir_add_3 = get_file_paths()\n print('got list dirs')\n\n# Lists of the image paths\n list_abspath_img_train_1 = get_list_abspath_img(dir_train_1)\n train_1_labels = [1]*len(list_abspath_img_train_1)\n list_abspath_img_train_2 = get_list_abspath_img(dir_train_2)\n train_2_labels = [2]*len(list_abspath_img_train_2)\n list_abspath_img_train_3 = get_list_abspath_img(dir_train_3)\n train_3_labels = [3]*len(list_abspath_img_train_3)\n train_lists = list_abspath_img_train_1 + list_abspath_img_train_2 + list_abspath_img_train_3\n train_labels = train_1_labels + train_2_labels + train_3_labels \n\n# Create/Save train df\n t_dict = {'paths': pd.Series(train_lists),\n 'labels':pd.Series(train_labels)}\n train_df = pd.DataFrame(t_dict)\n save_dirs(os.path.join(dir,'train.csv'), train_df, 'df')\n print('saved train dirs')\n\n# Create/Save test df\n test_list = get_list_abspath_img(dir_test)\n test_df = pd.DataFrame({'paths' : pd.Series(test_list)})\n save_dirs(os.path.join(dir,'test.csv'), test_df, 'df')\n print('saved test dirs')\n\n# Create/Save addtionals df\n list_abspath_img_add_1 = get_list_abspath_img(dir_add_1)\n add_1_labels = [1]*len(list_abspath_img_add_1)\n list_abspath_img_add_2 = get_list_abspath_img(dir_add_2)\n add_2_labels = [2]*len(list_abspath_img_add_2)\n list_abspath_img_add_3 = get_list_abspath_img(dir_add_3)\n add_3_labels = [3]*len(list_abspath_img_add_3)\n add_lists = list_abspath_img_add_1 + list_abspath_img_add_2 + list_abspath_img_add_3\n add_labels = add_1_labels + add_2_labels + add_3_labels\n add_dict = {'paths': pd.Series(add_lists),\n 'labels':pd.Series(add_labels)}\n add_df = pd.DataFrame(add_dict)\n save_dirs(os.path.join(dir,'additionals.csv'), add_df, 'df')\n print('saved additional dirs')\n\n# currently doesnt work\ndef create_count_df():\n # 0: Type_1, 1: Type_2, 2: Type_3\n list_answer_train = [0] * len(list_abspath_img_train_1) + [1] * len(list_abspath_img_train_2) + [2] * len(list_abspath_img_train_3)\n list_answer_add = [0] * len(list_abspath_img_add_1) + [1] * len(list_abspath_img_add_2) + [2] * len(list_abspath_img_add_3)\n\n# print(list_abspath_img_train_1[0:2])\n\n pandas_columns = ['Number of image files']\n pandas_index = ['train_1', 'train_2','train_3', 'train', 'test', 'add_1',\n 'add_2', 'add_3', 'add', 'train + add', 'total']\n pandas_data = [len(list_abspath_img_train_1), len(list_abspath_img_train_2),\n len(list_abspath_img_train_3), len(list_abspath_img_train),\n len(list_abspath_img_test), len(list_abspath_img_add_1),\n len(list_abspath_img_add_2), len(list_abspath_img_add_3),\n len(list_abspath_img_add), len(list_abspath_img_train) +\n len(list_abspath_img_add), len(list_abspath_img_train) +\n len(list_abspath_img_test) + len(list_abspath_img_add)]\n # counting number of image files\n df = pd.DataFrame(pandas_data, index=pandas_index, columns=pandas_columns)\n pandas_columns = ['Type_1', 'Type_2', 'Type_3']\n pandas_index = ['train', 'test', 'add']\n\n ratio_train = [x / len(list_abspath_img_train) for x in\n [len(list_abspath_img_train_1),\n len(list_abspath_img_train_2),\n len(list_abspath_img_train_3)]]\n ratio_test = ['?', '?', '?']\n ratio_add = [x / len(list_abspath_img_add) for x in\n [len(list_abspath_img_add_1), len(list_abspath_img_add_2),\n len(list_abspath_img_add_3)]]\n\n # show type ratios\n pandas_data = [ratio_train, ratio_test, ratio_add]\n\n df2 = pd.DataFrame(pandas_data, index = pandas_index, columns = pandas_columns)\n print(df2)\n\ndef main():\n save_img_dirs()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ] ]
BloodAxe/segmentation-networks-benchmark
[ "2e3feb560102230be9369ab442b4a59cc86dff61" ]
[ "lib/models/dilated_linknet.py" ]
[ "from torch import nn\nimport torch\nfrom torchvision import models\nimport torchvision\nfrom torch.nn import functional as F\n\nfrom lib.models.dilated_resnet import dilated_resnet34\n\n\nclass DecoderBlockLinkNet(nn.Module):\n def __init__(self, in_channels, n_filters):\n super().__init__()\n\n self.relu = nn.ReLU(inplace=True)\n\n # B, C, H, W -> B, C/4, H, W\n self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)\n self.norm1 = nn.BatchNorm2d(in_channels // 4)\n\n # B, C/4, H, W -> B, C/4, 2 * H, 2 * W\n self.deconv2 = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, kernel_size=4,\n stride=2, padding=1, output_padding=0)\n self.norm2 = nn.BatchNorm2d(in_channels // 4)\n\n # B, C/4, H, W -> B, C, H, W\n self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)\n self.norm3 = nn.BatchNorm2d(n_filters)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu(x)\n x = self.deconv2(x)\n x = self.norm2(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.norm3(x)\n x = self.relu(x)\n return x\n\n\nclass DilatedLinkNet34(nn.Module):\n def __init__(self, num_classes=1, num_channels=3, pretrained=True):\n super().__init__()\n assert num_channels == 3\n self.num_classes = num_classes\n filters = [64, 128, 256, 512]\n resnet = dilated_resnet34(pretrained=pretrained)\n\n self.firstconv = resnet.conv1\n self.firstbn = resnet.bn1\n self.firstrelu = resnet.relu\n self.firstmaxpool = resnet.maxpool\n self.encoder1 = resnet.layer1\n self.encoder2 = resnet.layer2\n self.encoder3 = resnet.layer3\n self.encoder4 = resnet.layer4\n\n # Decoder\n self.decoder4 = DecoderBlockLinkNet(filters[3], filters[2])\n self.decoder3 = DecoderBlockLinkNet(filters[2], filters[1])\n self.decoder2 = DecoderBlockLinkNet(filters[1], filters[0])\n self.decoder1 = DecoderBlockLinkNet(filters[0], filters[0])\n\n # Final Classifier\n self.finaldrop1 = nn.Dropout2d(p=0.5)\n self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)\n self.finalrelu1 = nn.ReLU(inplace=True)\n self.finalconv2 = nn.Conv2d(32, 32, 3)\n self.finalrelu2 = nn.ReLU(inplace=True)\n self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)\n\n # noinspection PyCallingNonCallable\n def forward(self, x):\n # Encoder\n x = self.firstconv(x)\n x = self.firstbn(x)\n x = self.firstrelu(x)\n x = self.firstmaxpool(x)\n e1 = self.encoder1(x)\n e2 = self.encoder2(e1)\n e3 = self.encoder3(e2)\n e4 = self.encoder4(e3)\n\n # Decoder with Skip Connections\n d4 = self.decoder4(e4) + e3\n d3 = self.decoder3(d4) + e2\n d2 = self.decoder2(d3) + e1\n d1 = self.decoder1(d2)\n\n # Final Classification\n # d1 = self.finaldrop1(d1) # Added dropout\n f1 = self.finaldeconv1(d1)\n f2 = self.finalrelu1(f1)\n f3 = self.finalconv2(f2)\n f4 = self.finalrelu2(f3)\n f5 = self.finalconv3(f4)\n\n return f5\n" ]
[ [ "torch.nn.Dropout2d", "torch.nn.ConvTranspose2d", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
manishgit138/numcodecs
[ "322baaca4162c43a6e37d4f4c0d0f0fd51faa554" ]
[ "numcodecs/tests/test_shuffle.py" ]
[ "from multiprocessing import Pool\nfrom multiprocessing.pool import ThreadPool\n\n\nimport numpy as np\nimport pytest\n\n\ntry:\n from numcodecs.shuffle import Shuffle\nexcept ImportError: # pragma: no cover\n pytest.skip(\n \"numcodecs.shuffle not available\", allow_module_level=True\n )\n\n\nfrom numcodecs.tests.common import (check_encode_decode,\n check_config,\n check_backwards_compatibility)\n\n\ncodecs = [\n Shuffle(),\n Shuffle(elementsize=0),\n Shuffle(elementsize=4),\n Shuffle(elementsize=8)\n]\n\n\n# mix of dtypes: integer, float, bool, string\n# mix of shapes: 1D, 2D, 3D\n# mix of orders: C, F\narrays = [\n np.arange(1000, dtype='i4'),\n np.linspace(1000, 1001, 1000, dtype='f8'),\n np.random.normal(loc=1000, scale=1, size=(100, 10)),\n np.random.randint(0, 2, size=1000, dtype=bool).reshape(100, 10, order='F'),\n np.random.choice([b'a', b'bb', b'ccc'], size=1000).reshape(10, 10, 10),\n np.random.randint(0, 2**60, size=1000, dtype='u8').view('M8[ns]'),\n np.random.randint(0, 2**60, size=1000, dtype='u8').view('m8[ns]'),\n np.random.randint(0, 2**25, size=1000, dtype='u8').view('M8[m]'),\n np.random.randint(0, 2**25, size=1000, dtype='u8').view('m8[m]'),\n np.random.randint(-2**63, -2**63 + 20, size=1000, dtype='i8').view('M8[ns]'),\n np.random.randint(-2**63, -2**63 + 20, size=1000, dtype='i8').view('m8[ns]'),\n np.random.randint(-2**63, -2**63 + 20, size=1000, dtype='i8').view('M8[m]'),\n np.random.randint(-2**63, -2**63 + 20, size=1000, dtype='i8').view('m8[m]'),\n]\n\n\[email protected]('array', arrays)\[email protected]('codec', codecs)\ndef test_encode_decode(array, codec):\n check_encode_decode(array, codec)\n\n\ndef test_config():\n codec = Shuffle()\n check_config(codec)\n codec = Shuffle(elementsize=8)\n check_config(codec)\n\n\ndef test_repr():\n expect = \"Shuffle(elementsize=0)\"\n actual = repr(Shuffle(elementsize=0))\n assert expect == actual\n expect = \"Shuffle(elementsize=4)\"\n actual = repr(Shuffle(elementsize=4))\n assert expect == actual\n expect = \"Shuffle(elementsize=8)\"\n actual = repr(Shuffle(elementsize=8))\n assert expect == actual\n expect = \"Shuffle(elementsize=16)\"\n actual = repr(Shuffle(elementsize=16))\n assert expect == actual\n\n\ndef test_eq():\n assert Shuffle() == Shuffle()\n assert Shuffle(elementsize=16) != Shuffle()\n\n\ndef _encode_worker(data):\n compressor = Shuffle()\n enc = compressor.encode(data)\n return enc\n\n\ndef _decode_worker(enc):\n compressor = Shuffle()\n data = compressor.decode(enc)\n return data\n\n\[email protected]('pool', (Pool, ThreadPool))\ndef test_multiprocessing(pool):\n data = np.arange(1000000)\n enc = _encode_worker(data)\n\n pool = pool(5)\n\n # test with process pool and thread pool\n\n # test encoding\n enc_results = pool.map(_encode_worker, [data] * 5)\n assert all([len(enc) == len(e) for e in enc_results])\n\n # test decoding\n dec_results = pool.map(_decode_worker, [enc] * 5)\n assert all([data.nbytes == len(d) for d in dec_results])\n\n # tidy up\n pool.close()\n pool.join()\n\n\ndef test_backwards_compatibility():\n check_backwards_compatibility(Shuffle.codec_id, arrays, codecs)\n\n\n# def test_err_decode_object_buffer():\n# check_err_decode_object_buffer(Shuffle())\n\n\n# def test_err_encode_object_buffer():\n# check_err_encode_object_buffer(Shuffle())\n\n# def test_decompression_error_handling():\n# for codec in codecs:\n# with pytest.raises(RuntimeError):\n# codec.decode(bytearray())\n# with pytest.raises(RuntimeError):\n# codec.decode(bytearray(0))\n\ndef test_expected_result():\n # Each byte of the 4 byte uint64 is shuffled in such a way\n # that for an array of length 4, the last byte of the last\n # element becomes the first byte of the first element\n # therefore [0, 0, 0, 1] becomes [2**((len-1)*8), 0, 0, 0]\n # (where 8 = bits in a byte)\n arr = np.array([0, 0, 0, 1], dtype='uint64')\n codec = Shuffle(elementsize=arr.data.itemsize)\n enc = codec.encode(arr)\n assert np.frombuffer(enc.data, arr.dtype)[0] == 2**((len(arr)-1)*8)\n\n\ndef test_incompatible_elementsize():\n with pytest.raises(ValueError):\n arr = np.arange(1001, dtype='u1')\n codec = Shuffle(elementsize=4)\n codec.encode(arr)\n" ]
[ [ "numpy.linspace", "numpy.random.choice", "numpy.arange", "numpy.frombuffer", "numpy.random.normal", "numpy.array", "numpy.random.randint" ] ]
beark007/smarts_ppo
[ "8f6aa33a6fcfb74dc0b8e92951d6b70d6e2874de" ]
[ "benchmark/agents/maac/tf_policy.py" ]
[ "# MIT License\n#\n# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\"\"\"\nCentralized A2C policy\n\"\"\"\nimport numpy as np\n\nfrom collections import OrderedDict\nfrom gym import spaces\n\nfrom ray.rllib.models.preprocessors import Preprocessor\nfrom ray.rllib.agents.trainer_template import build_trainer\nfrom ray.rllib.agents.trainer import with_common_config\nfrom ray.rllib.agents.a3c.a3c_tf_policy import A3CLoss\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.policy.tf_policy_template import build_tf_policy\nfrom ray.rllib.policy.tf_policy import TFPolicy\nfrom ray.rllib.utils import try_import_tf\nfrom ray.rllib.utils.tf_ops import make_tf_callable, explained_variance\nfrom ray.rllib.evaluation.postprocessing import Postprocessing, compute_advantages\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.models.modelv2 import ModelV2\n\nfrom benchmark.networks import CentralizedActorCriticModel\n\n\ntf1, tf, tfv = try_import_tf()\n\n\nclass CentralizedValueMixin:\n def __init__(self: TFPolicy):\n self.compute_central_vf = make_tf_callable(\n self.get_session(), dynamic_shape=True\n )(self.model.central_value_function)\n\n\ndef build_cac_model(\n policy: TFPolicy, obs_space: spaces.Space, action_space: spaces.Space, config\n) -> ModelV2:\n policy.model = ModelCatalog.get_model_v2(\n obs_space=obs_space,\n action_space=action_space,\n num_outputs=action_space.n\n if isinstance(action_space, spaces.Discrete)\n else np.product(action_space.shape),\n model_config=config[\"model\"],\n framework=\"tf\",\n default_model=CentralizedActorCriticModel,\n name=\"cac\",\n )\n\n return policy.model\n\n\ndef get_action_buffer(\n action_space: spaces.Space,\n action_preprocessor: Preprocessor,\n batch: SampleBatch,\n copy_length: int,\n):\n if isinstance(action_space, spaces.Discrete):\n buffer_action = np.eye(action_preprocessor.size)[\n batch[SampleBatch.ACTIONS][:copy_length]\n ]\n elif isinstance(action_space, spaces.Box):\n buffer_action = batch[SampleBatch.ACTIONS][:copy_length]\n else:\n raise NotImplementedError(\n f\"Do not support such an action space yet: {action_space}\"\n )\n return buffer_action\n\n\ndef postprocess_trajectory(\n policy: TFPolicy, sample_batch: SampleBatch, other_agent_batches=None, episode=None\n):\n last_r = 0.0\n batch_length = len(sample_batch[SampleBatch.CUR_OBS])\n critic_preprocessor = policy.model.critic_preprocessor\n action_preprocessor = policy.model.act_preprocessor\n obs_preprocessor = policy.model.obs_preprocessor\n critic_obs_array = np.zeros((batch_length,) + critic_preprocessor.shape)\n\n offset_slot = action_preprocessor.size + obs_preprocessor.size\n\n if policy.loss_initialized():\n # ordered by agent keys\n other_agent_batches = OrderedDict(other_agent_batches)\n for i, (other_id, (other_policy, batch)) in enumerate(\n other_agent_batches.items()\n ):\n offset = (i + 1) * offset_slot\n copy_length = min(batch_length, batch[SampleBatch.CUR_OBS].shape[0])\n\n # TODO(ming): check the action type\n buffer_action = get_action_buffer(\n policy.action_space, action_preprocessor, batch, copy_length\n )\n oppo_features = np.concatenate(\n [batch[SampleBatch.CUR_OBS][:copy_length], buffer_action], axis=-1\n )\n assert oppo_features.shape[-1] == offset_slot\n critic_obs_array[\n :copy_length, offset : offset + offset_slot\n ] = oppo_features\n\n # fill my features to critic_obs_array\n buffer_action = get_action_buffer(\n policy.action_space, action_preprocessor, sample_batch, batch_length\n )\n critic_obs_array[:batch_length, 0:offset_slot] = np.concatenate(\n [sample_batch[SampleBatch.CUR_OBS], buffer_action], axis=-1\n )\n\n sample_batch[CentralizedActorCriticModel.CRITIC_OBS] = critic_obs_array\n sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_vf(\n sample_batch[CentralizedActorCriticModel.CRITIC_OBS]\n )\n else:\n sample_batch[CentralizedActorCriticModel.CRITIC_OBS] = critic_obs_array\n sample_batch[SampleBatch.VF_PREDS] = np.zeros_like(\n (batch_length,), dtype=np.float32\n )\n\n train_batch = compute_advantages(\n sample_batch,\n last_r,\n policy.config[\"gamma\"],\n policy.config[\"lambda\"],\n policy.config[\"use_gae\"],\n )\n return train_batch\n\n\ndef ac_loss_func(policy, model, dist_class, train_batch):\n \"\"\" Predefined actor-critic loss reuse.\n \"\"\"\n logits, _ = policy.model.from_batch(train_batch)\n action_dist = dist_class(logits, policy.model)\n\n policy.loss = A3CLoss(\n action_dist,\n train_batch[SampleBatch.ACTIONS],\n train_batch[Postprocessing.ADVANTAGES],\n train_batch[Postprocessing.VALUE_TARGETS],\n policy.model.central_value_function(\n train_batch[CentralizedActorCriticModel.CRITIC_OBS]\n ),\n policy.config[\"vf_loss_coeff\"],\n policy.config[\"entropy_coeff\"],\n )\n\n return policy.loss.total_loss\n\n\ndef setup_mixins(policy, obs_space, action_space, config):\n CentralizedValueMixin.__init__(policy)\n\n\ndef stats(policy, train_batch):\n return {\n \"policy_loss\": policy.loss.pi_loss,\n \"policy_entropy\": policy.loss.entropy,\n \"vf_loss\": policy.loss.vf_loss,\n }\n\n\ndef central_vf_stats(policy, train_batch, grads):\n # Report the explained variance of the central value function.\n return {\n \"grad_gnorm\": tf.linalg.global_norm(grads),\n \"vf_explained_var\": explained_variance(\n train_batch[Postprocessing.VALUE_TARGETS], policy.model.value_function()\n ),\n }\n\n\nDEFAULT_CONFIG = with_common_config(\n {\n \"gamma\": 0.95,\n \"lambda\": 1.0, # if gae=true, work for it.\n \"use_gae\": False,\n \"vf_loss_coeff\": 0.5,\n \"entropy_coeff\": 0.01,\n \"truncate_episodes\": True,\n \"use_critic\": True,\n \"grad_clip\": 40.0,\n \"lr\": 0.0001,\n \"min_iter_time_s\": 5,\n \"sample_async\": True,\n \"lr_schedule\": None,\n }\n)\n\n\nCA2CTFPolicy = build_tf_policy(\n name=\"CA2CTFPolicy\",\n stats_fn=stats,\n grad_stats_fn=central_vf_stats,\n loss_fn=ac_loss_func,\n postprocess_fn=postprocess_trajectory,\n before_loss_init=setup_mixins,\n make_model=build_cac_model,\n mixins=[CentralizedValueMixin],\n get_default_config=lambda: DEFAULT_CONFIG,\n)\n\n\nCA2CTrainer = build_trainer(\n name=\"CA2C\", default_policy=CA2CTFPolicy, default_config=DEFAULT_CONFIG\n)\n" ]
[ [ "numpy.product", "numpy.eye", "numpy.concatenate", "numpy.zeros_like", "numpy.zeros" ] ]
thcasey3/DeerLab
[ "27ff814fb8c2771740bd7928f283c1f70e88215c" ]
[ "deerlab/model.py" ]
[ "# model.py - DeerLab's modelling interface\r\n# ---------------------------------------------------------------------------\r\n# This file is a part of DeerLab. License is MIT (see LICENSE.md). \r\n# Copyright(c) 2019-2021: Luis Fabregas, Stefan Stoll and other contributors.\r\n\r\nimport numpy as np\r\nfrom scipy.sparse.construct import block_diag\r\nfrom scipy.optimize import fminbound\r\nfrom deerlab.solvers import snlls\r\nfrom deerlab.classes import FitResult, UQResult\r\nfrom deerlab.noiselevel import noiselevel\r\nfrom deerlab.bootstrap_analysis import bootstrap_analysis\r\nfrom deerlab.utils import formatted_table, parse_multidatasets\r\nimport inspect \r\nfrom copy import copy,deepcopy\r\nfrom types import ModuleType\r\nimport difflib\r\n\r\n#===================================================================================\r\nclass Parameter(): \r\n r\"\"\" Represents a model parameter or a single parameter vector. \r\n\r\n Attributes\r\n ----------\r\n name : string \r\n Name of the parameter\r\n\r\n description : string \r\n Description of the parameter\r\n\r\n units : string \r\n Physical units of the parameter\r\n\r\n par0 : float or array_like \r\n Value at which to initialize the parameter at the start of a fit routine. \r\n Must be specified in the model or latest upon fitting.\r\n\r\n lb : float or array_like \r\n Lower bound of the parameter. If not specified it is assumed to unbounded.\r\n\r\n ub : float or array_like \r\n Upper bound of the parameter. If not specified it is assumed to unbounded.\r\n\r\n linear : boolean \r\n Describes whether the model behaves linearly with respect to the parameter.\r\n\r\n frozen : boolean \r\n Describes whether the parameter will be frozen at a specific value during a fit.\r\n\r\n value : float\r\n Value at which the parameter will be frozen during a fit.\r\n\r\n\r\n Methods\r\n -------\r\n\r\n \"\"\"\r\n\r\n #=======================================================================================\r\n # Constructor\r\n #=======================================================================================\r\n\r\n #---------------------------------------------------------------------------------------\r\n def __init__(self, name=None, parent=None, idx=None, description=None, par0=None, frozen=False, lb=-np.inf, ub=np.inf,value=None, units=None, linear=False): \r\n # Attributes\r\n self.name = name\r\n self._parent = parent # Parent \r\n self.idx = idx\r\n self.description = description # Description\r\n self.units = units # Units\r\n self.par0 = par0 # Start values\r\n self.lb = lb # Lower bounds\r\n self.ub = ub # Upper bounds\r\n self.value = value\r\n self.frozen = frozen # Frozen\r\n self.linear = linear # Linearity\r\n #---------------------------------------------------------------------------------------\r\n\r\n #=======================================================================================\r\n # Methods\r\n #=======================================================================================\r\n\r\n #---------------------------------------------------------------------------------------\r\n def set(self,**attributes):\r\n \"\"\"\r\n Set one or multiple attributes for a parameter. See list of attributes for a reference list. \r\n\r\n Parameters\r\n ----------\r\n attributes : keyword/values pairs\r\n Pairs of keywords defining the parameter attribute and the value to be assignes.\r\n\r\n Examples\r\n --------\r\n Setting a parameter's start value ::\r\n\r\n parameter.set(par0=0.5)\r\n\r\n Setting both the lower bound and upper bound values :: \r\n\r\n parameter.set(lb=0, ub=1)\r\n\r\n \"\"\"\r\n for key in attributes:\r\n if not hasattr(self,key):\r\n raise AttributeError(f\"'{key}' is not a valid parameter attribute.\")\r\n setattr(self,key,attributes[key])\r\n return \r\n #---------------------------------------------------------------------------------------\r\n \r\n #---------------------------------------------------------------------------------------\r\n def freeze(self,value):\r\n \"\"\"\r\n Freeze a parameter during a fit/optimization to a given value. Does not affect model evaluation. \r\n\r\n Parameters\r\n ----------\r\n value : float or array_like\r\n Value at which to freeze the parameter during optimization.\r\n \"\"\"\r\n N = len(np.atleast_1d(self.frozen))\r\n if N>1:\r\n self.frozen = np.full(N,True)\r\n self.value = np.full(N,value) \r\n else:\r\n self.frozen = True\r\n self.value = value \r\n #---------------------------------------------------------------------------------------\r\n\r\n #---------------------------------------------------------------------------------------\r\n def unfreeze(self):\r\n \"\"\"\r\n Release a frozen parameter's value during a fit/optimization. Does not affect model evaluation.\r\n \"\"\"\r\n N = len(np.atleast_1d(self.frozen))\r\n if N>1:\r\n self.frozen = np.full(N,False)\r\n self.value = np.full(N,None) \r\n else:\r\n self.frozen = False\r\n self.value = None\r\n #---------------------------------------------------------------------------------------\r\n#===================================================================================\r\n\r\n\r\n\r\nclass Model():\r\n#===================================================================================\r\n r\"\"\"Represents a model.\r\n\r\n Attributes\r\n ----------\r\n <parameter_name> : :ref:`Parameter` \r\n Model parameter. One :ref:`Parameter` instance is assigned for each\r\n parameter (with name ``<parameter_name>``) in the model. \r\n description : string \r\n Description of the model.\r\n signature : string \r\n Call signature (keyword arguments in order) of the model. \r\n nonlinmodel : callable\r\n Function of the non-linear part of the model.\r\n Nnonlin : int scalar\r\n Number of non-linear parameters in the model.\r\n Nlin : int scalar\r\n Number of linear parameters in the model.\r\n Nparam : int scalar\r\n Number of parameters in the model.\r\n \"\"\"\r\n\r\n\r\n #=======================================================================================\r\n # Constructor\r\n #=======================================================================================\r\n\r\n #---------------------------------------------------------------------------------------\r\n def __init__(self,nonlinfcn,constants=None,signature=None): \r\n \"\"\"\r\n Construct a new model from a non-linear function. \r\n\r\n Parameters\r\n ----------\r\n\r\n nonlinfcn : callable \r\n Function that takes a set of non-linear parameters and \r\n returns either a the full model response or the design matrix \r\n of the model response. A parameter will be added to the new model for each \r\n input argument defined in the function signature. \r\n \r\n constants : string or list thereof\r\n Names of the arguments taken by the ``nonlinfcn`` function to be defined as\r\n constants. These will not be added as parameters to the new model.\r\n\r\n signature : list of strings\r\n Signature of the ``nonlinfcn`` function to manually specify the names\r\n of the input arguments. For internal use (mostly).\r\n\r\n Returns\r\n -------\r\n\r\n model : ``Model`` object \r\n Model object instance that takes the parameters defined for ``nonlinfcn``\r\n and returns the output of ``nonlinfcn``.\r\n\r\n \"\"\"\r\n if not callable(nonlinfcn):\r\n Amatrix = nonlinfcn.copy()\r\n nonlinfcn = lambda *_: Amatrix \r\n self.nonlinmodel = nonlinfcn\r\n self.description = None\r\n self._constantsInfo = []\r\n self.parents = None\r\n if signature is None:\r\n # Get list of parameter names from the function signature\r\n signature = inspect.getfullargspec(nonlinfcn).args\r\n\r\n self._nonlinsignature = signature.copy()\r\n parameters = signature.copy()\r\n # Check if one of the arguments is an axis argument \r\n if constants is not None:\r\n if not isinstance(constants,list):\r\n constants = [constants]\r\n for argname in constants:\r\n for n,par in enumerate(parameters): \r\n if par==argname: \r\n self._constantsInfo.append({\"argkey\":argname,'argidx':n})\r\n for argname in constants:\r\n for n,par in enumerate(parameters): \r\n if par==argname: \r\n parameters.remove(argname)\r\n Nconstants = len(self._constantsInfo)\r\n\r\n\r\n # Use a wrapper function to facilitate internal arguments manipulation \r\n #-----------------------------------\r\n def model_with_constants(*inputargs):\r\n constants = inputargs[:Nconstants]\r\n θ = inputargs[Nconstants:]\r\n args = list(θ)\r\n if self._constantsInfo is not None:\r\n for info,constant in zip(self._constantsInfo,constants):\r\n args.insert(info['argidx'],constant)\r\n return nonlinfcn(*args)\r\n #----------------------------------- \r\n self.nonlinmodel = model_with_constants\r\n\r\n # Update the number of parameters in the model\r\n self.Nparam = len(parameters)\r\n self.Nnonlin = len(parameters)\r\n self.Nlin = 0\r\n\r\n for n,param in enumerate(parameters): \r\n newparam = Parameter(parent=self, idx=n, name=param)\r\n setattr(self,param,newparam)\r\n self.signature = signature\r\n #---------------------------------------------------------------------------------------\r\n\r\n # Gets called when an attribute is accessed\r\n #--------------------------------------------------------------------------------\r\n def __getattribute__(self, attr):\r\n try:\r\n return super(Model, self).__getattribute__(attr)\r\n except AttributeError:\r\n errstr = f'The model has no attribute {attr}.'\r\n attributes = [key for key in self.__dict__]\r\n proposal = difflib.get_close_matches(attr, attributes)\r\n if len(proposal)>0:\r\n errstr += f' \\n Did you mean: {proposal} ?'\r\n raise AttributeError(errstr)\r\n #--------------------------------------------------------------------------------\r\n\r\n\r\n #=======================================================================================\r\n # Private methods\r\n #=======================================================================================\r\n\r\n #---------------------------------------------------------------------------------------\r\n def _core_model(self,Amodel,θnonlin,θlin): \r\n \"\"\" \r\n Calculates the core model response ``y`` based on the mathematical expression ``y = A(θnonlin)@θlin``.\r\n \"\"\"\r\n # Calculate the design matrix\r\n if len(θnonlin)>0:\r\n A = Amodel(*θnonlin)\r\n else:\r\n A = Amodel()\r\n\r\n # Ensure the proper matrix properties\r\n A = np.atleast_2d(A)\r\n θlin = np.atleast_1d(np.squeeze(θlin))\r\n\r\n # If there are no linear parameters defined\r\n if len(θlin)==0: \r\n θlin = np.array([1])\r\n\r\n if A.shape[1]!=len(θlin): \r\n A = A.T\r\n\r\n # Full model calculation \r\n y = A@θlin\r\n \r\n return y\r\n #---------------------------------------------------------------------------------------\r\n\r\n #---------------------------------------------------------------------------------------\r\n def _parameter_list(self, order='alphabetical'):\r\n \"Get the list of parameters defined in the model sorted alphabetically or by vector definition\"\r\n if order=='alphabetical':\r\n keylist = [param for param in dir(self) if isinstance(getattr(self,param),Parameter)]\r\n elif order=='vector':\r\n keylist = [param for param in dir(self) if isinstance(getattr(self,param),Parameter)]\r\n # If there are any parameters in vector form...\r\n n = 0\r\n for key in keylist: \r\n if isinstance(getattr(self,key).idx,np.ndarray):\r\n # ...insert the key string for the same number of linear parameters in that vector \r\n keylist = np.insert(keylist,n*np.ones(len(np.atleast_1d(getattr(self,key).idx))-1,dtype=int),key) \r\n n += len(np.atleast_1d(getattr(self,key).idx))\r\n\r\n keylist = self._vecsort(keylist)\r\n # Remove any duplicates\r\n keylist = list(dict.fromkeys(keylist))\r\n return keylist\r\n #---------------------------------------------------------------------------------------\r\n\r\n #---------------------------------------------------------------------------------------\r\n def _vecsort(self,list):\r\n \"Sort vectorized parameters attributes from alphabetical ordering to vector indexing\"\r\n list = np.squeeze(np.atleast_1d(list))\r\n indices = np.concatenate([np.atleast_1d(getattr(self,param).idx) for param in dir(self) if isinstance(getattr(self,param),Parameter)])\r\n orderedlist = np.atleast_1d(list.copy())\r\n orderedlist[indices] = list\r\n\r\n return orderedlist\r\n #---------------------------------------------------------------------------------------\r\n\r\n #---------------------------------------------------------------------------------------\r\n def _split_linear(self,variable):\r\n \"Split a vector in non-linear and linear parameter subset vectors\"\r\n variable = np.atleast_1d(variable)\r\n linear = np.concatenate([np.atleast_1d(getattr(self,param).linear) for param in dir(self) if isinstance(getattr(self,param),Parameter)])\r\n linear = self._vecsort(linear)\r\n variable_nonlin = variable[~linear]\r\n variable_lin = variable[linear]\r\n return variable_lin, variable_nonlin\r\n #---------------------------------------------------------------------------------------\r\n\r\n #---------------------------------------------------------------------------------------\r\n def _merge_linear(self,variable_nonlin,variable_lin):\r\n \"Merge a vector's non-linear and linear parameter subset vectors\"\r\n variable = np.zeros(len(variable_nonlin)+len(variable_lin))\r\n linear = np.concatenate([np.atleast_1d(getattr(self,param).linear) for param in dir(self) if isinstance(getattr(self,param),Parameter)])\r\n linear = self._vecsort(linear)\r\n variable[~linear] = variable_nonlin\r\n variable[linear] = variable_lin\r\n return variable\r\n #---------------------------------------------------------------------------------------\r\n\r\n #---------------------------------------------------------------------------------------\r\n def _getvector(self,attribute):\r\n \"Get the list of parameters attributes defined in the model sorted alphabetically\"\r\n return np.concatenate([np.atleast_1d(getattr(getattr(self,param),attribute)) for param in dir(self) if isinstance(getattr(self,param),Parameter)])\r\n #---------------------------------------------------------------------------------------\r\n\r\n #-----------------------------------------------------------------------------\r\n def _getparamuq(self,uq_full,paramidx):\r\n \"Get the uncertainty quantification of an individual parameter\"\r\n subset_model = lambda x: x[paramidx]\r\n param_lb = self._vecsort(self._getvector('lb'))[paramidx]\r\n param_ub = self._vecsort(self._getvector('ub'))[paramidx]\r\n frozen = self._vecsort(self._getvector('frozen'))[paramidx]\r\n if np.all(frozen): \r\n param_uq = UQResult('void')\r\n else:\r\n if uq_full.type=='covariance':\r\n param_uq = uq_full.propagate(subset_model,lb=param_lb, ub=param_ub)\r\n elif uq_full.type=='bootstrap':\r\n param_uq = UQResult('bootstrap',data=uq_full.samples[:,paramidx],lb=param_lb, ub=param_ub)\r\n else:\r\n param_uq = UQResult('void')\r\n return param_uq\r\n #-----------------------------------------------------------------------------\r\n\r\n #-----------------------------------------------------------------------------\r\n def _check_if_already_exists(self,key):\r\n if hasattr(self,key):\r\n raise KeyError(f'The model already has a \"{key}\" parameter.')\r\n #-----------------------------------------------------------------------------\r\n \r\n \r\n #=======================================================================================\r\n # Methods\r\n #=======================================================================================\r\n\r\n #---------------------------------------------------------------------------------------\r\n def addnonlinear(self, key, lb=-np.inf, ub=np.inf, par0=None, name=None, units=None, description=None):\r\n \"\"\"\r\n Add a new non-linear parameter (:ref:`Parameter` object) to the model. \r\n\r\n Parameters\r\n ----------\r\n key : string\r\n Identifier of the parameter. This name will be used to refer\r\n to the parameter in the model.\r\n\r\n lb : float or array_like, optional\r\n Lower bound of the parameter. If not specified, it is set to ``-np.inf``.\r\n\r\n ub : float or array_like, optional\r\n Lower bound of the parameter. If not specified, it is set to ``+np.inf``.\r\n\r\n description : string, optional \r\n Descriptrion of the parameter. \r\n\r\n units : string, optional\r\n Physical units of the parameter.\r\n \"\"\"\r\n self._check_if_already_exists(key)\r\n idx = self.Nparam\r\n self.Nparam += 1\r\n self.Nnonlin += 1\r\n newparam = Parameter(name=key, linear=False, parent=self, idx=idx, par0=par0, lb=lb, ub=ub, units=units, description=description)\r\n setattr(self,key,newparam)\r\n Nconstants = len(self._constantsInfo)\r\n Amodel = self.nonlinmodel\r\n topop = self.Nnonlin-1\r\n #------------------------------------------------\r\n def model_with_constants_and_added_nonlin(*inputargs):\r\n constants = inputargs[:Nconstants]\r\n θ = inputargs[Nconstants:]\r\n args = list(θ)\r\n args.pop(topop)\r\n if self._constantsInfo is not None:\r\n for info,constant in zip(self._constantsInfo,constants):\r\n args.insert(info['argidx'],constant)\r\n return Amodel(*args)\r\n #------------------------------------------------\r\n self.nonlinmodel = model_with_constants_and_added_nonlin\r\n self.signature.append(key)\r\n #---------------------------------------------------------------------------------------\r\n\r\n\r\n #---------------------------------------------------------------------------------------\r\n def addlinear(self, key, vec=1, lb=-np.inf, ub=np.inf, par0=None, name=None, units=None, description=None):\r\n \"\"\"\r\n Add a new linear parameter (:ref:`Parameter` object) to the model. \r\n\r\n Parameters\r\n ----------\r\n key : string\r\n Identifier of the parameter. This name will be used to refer\r\n to the parameter in the model.\r\n\r\n vec : int scalar, optional\r\n Number of elements in the parameter. If ``vec>1`` then the parameter will represent a \r\n vector of linear parameters of length ``vec``. By default, a scalar parameter is defined.\r\n\r\n lb : float or array_like, optional\r\n Lower bound of the parameter. For vectorized parameters, must be a \r\n vector with ``vec`` elements. If not specified, it is set to ``-np.inf``.\r\n\r\n ub : float or array_like, optional\r\n Lower bound of the parameter. For vectorized parameters, must be a\r\n vector with ``vec`` elements. If not specified, it is set to ``+np.inf``.\r\n\r\n description : string, optional \r\n Descriptrion of the parameter. \r\n\r\n units : string, optional\r\n Physical units of the parameter.\r\n \"\"\"\r\n self._check_if_already_exists(key)\r\n if vec>1: \r\n idx = np.arange(self.Nparam,self.Nparam+vec) \r\n self.Nparam += vec \r\n self.Nlin += vec\r\n newparam = Parameter(name=key, linear=np.full(vec,True), parent=self, idx=idx, par0=np.full(vec,par0), lb=np.full(vec,lb), ub=np.full(vec,ub), value=np.full(vec,None),frozen=np.full(vec,False), units=units, description=description)\r\n else:\r\n idx = self.Nparam\r\n self.Nparam += 1\r\n self.Nlin += 1\r\n newparam = Parameter(name=key, linear=True, parent=self, idx=idx, par0=par0, lb=lb, ub=ub, units=units, description=description)\r\n setattr(self,key,newparam)\r\n self.signature.append(key)\r\n #---------------------------------------------------------------------------------------\r\n\r\n\r\n def __call__(self,*args,**kargs):\r\n #---------------------------------------------------------------------------------------\r\n \"\"\"\r\n Evaluate the model for a given set of parameters. \r\n\r\n Takes the constant variables and (non-linear and linear) parameter variables as positional\r\n or keyword arguments and evaluateds the model.\r\n \"\"\"\r\n # Check that the correct number of arguments have been specified\r\n Nrequired = len(self._parameter_list())\r\n Nrequired += len(self._constantsInfo)\r\n if (len(args)+len(kargs))!=Nrequired:\r\n raise SyntaxError(f'The model requires {Nrequired} arguments, but {len(args)+len(kargs)} have been specified.')\r\n\r\n # Positional arguments \r\n args_constants= [np.atleast_1d(args[info['argidx']]) for info in self._constantsInfo if info['argidx']<len(args)]\r\n args_list = [np.atleast_1d(arg) for idx,arg in enumerate(args) if idx not in [info['argidx'] for info in self._constantsInfo]]\r\n\r\n # Keywords arguments\r\n kargs_constants = [np.atleast_1d(kargs[info['argkey']]) for info in self._constantsInfo if info['argkey'] in kargs] \r\n kargs_list = [np.atleast_1d(kargs[param]) for param in self._parameter_list(order='vector')[len(args_list):]]\r\n \r\n constants = args_constants + kargs_constants\r\n param_list = args_list + kargs_list\r\n\r\n # Concatente all parameter into a single vector\r\n θ = np.concatenate(param_list)\r\n\r\n # Check that all parameters have been passed\r\n if len(θ)!=self.Nparam:\r\n raise SyntaxError(f'The model requires {self.Nparam} parameters, but {len(args_list)} were specified.') \r\n\r\n # Determine which parameters are linear and which nonlinear\r\n θlin, θnonlin = self._split_linear(θ)\r\n\r\n # Evaluate the core model\r\n y = self._core_model(lambda *θ: self.nonlinmodel(*constants,*θ),θnonlin,θlin)\r\n\r\n # Evaluate whether the response has \r\n if hasattr(self,'_posteval_fcn'):\r\n y = self._posteval_fcn(y,*constants,*θ)\r\n return y\r\n #---------------------------------------------------------------------------------------\r\n\r\n #---------------------------------------------------------------------------------------\r\n def getmetadata(self):\r\n \"\"\"\r\n Utility function to quickly request all metadata attributes of the model in vector form. \r\n All elements are sorted according to the model function signature.\r\n\r\n Returns\r\n -------\r\n metadata : dict\r\n Dictionary containing all the model's metadata in ordered vectors. The model keys \r\n correspond to the model attributes, e.g. ``metadata['lb']`` corresponds to the model's\r\n parameters lower boundaries. \r\n \"\"\"\r\n return {\r\n 'names': self._parameter_list(order='vector'),\r\n 'ub' : self._vecsort(self._getvector('ub')),\r\n 'lb' : self._vecsort(self._getvector('lb')),\r\n 'par0' : self._vecsort(self._getvector('par0')),\r\n 'frozen' : self._vecsort(self._getvector('frozen')),\r\n 'linear' : self._vecsort(self._getvector('linear')),\r\n 'values' : self._vecsort(self._getvector('value')),\r\n 'units' : self._vecsort(self._getvector('units')),\r\n }\r\n #---------------------------------------------------------------------------------------\r\n\r\n \r\n #---------------------------------------------------------------------------------------\r\n def _parameter_table(self):\r\n string = inspect.cleandoc(f\"\"\"\r\n Description: {self.description}\r\n Signature: ({', '.join(self.signature)})\r\n Constants: [{', '.join([entry['argkey'] for entry in self._constantsInfo])}]\r\n Parameter Table: \r\n \"\"\")\r\n string += '\\n'\r\n table = []\r\n table.append(['Name','Lower','Upper','Type','Frozen','Units','Description']) \r\n alignment = ['<','^','^','^','^','^','<']\r\n for n,paramname in enumerate(self._parameter_list(order='vector')): \r\n param_str = paramname\r\n lb_str = f'{np.atleast_1d(getattr(self,paramname).lb)[0]:5.3g}'\r\n ub_str = f'{np.atleast_1d(getattr(self,paramname).ub)[0]:5.3g}'\r\n linear_str = \"linear\" if np.all(getattr(self,paramname).linear) else \"nonlin\"\r\n frozen_str = \"Yes\" if np.all(getattr(self,paramname).frozen) else \"No\"\r\n units_str = str(getattr(self,paramname).units)\r\n desc_str = str(getattr(self,paramname).description)\r\n table.append([param_str,lb_str,ub_str,linear_str,frozen_str,units_str,desc_str])\r\n string += formatted_table(table,alignment)\r\n return string\r\n #---------------------------------------------------------------------------------------\r\n\r\n def __str__(self):\r\n return self._parameter_table()\r\n #def __repr__(self):\r\n # return self._parameter_table() \r\n\r\n#===================================================================================\r\n\r\n#==============================================================================\r\nclass Penalty():\r\n r\"\"\"Represents a penalty term of the objective function. \r\n\r\n Attributes\r\n ----------\r\n weight : ``Parameter`` instance \r\n Penalty weight parameter (a ``Parameter`` object instance without the\r\n ``linear`` and ``par0`` attributes). \r\n description : string \r\n Description of the penalty. \r\n selection : string \r\n Name of the selection functional for the penalty weight optimization. \r\n\r\n \"\"\"\r\n #--------------------------------------------------------------------------\r\n def __init__(self,penaltyfcn,selection,description=None,signature=None):\r\n r\"\"\"\r\n Construct a new penalty object. \r\n\r\n Parameters\r\n ----------\r\n penaltyfcn : callable \r\n Function that takes a set of parameters and returns a vector that\r\n will internally be squared and appended to the least-squares \r\n residual vector. The names of the arguments defined in the function\r\n signature must match the names of the parameter in the model used along\r\n the penalty. \r\n \r\n selection : string \r\n Selection functional for the outer optimization of the penalty weight. \r\n\r\n - ``'aic'`` - Akaike information criterion\r\n - ``'bic'`` - Bayesian information criterion\r\n - ``'aicc'`` - COrrected Akaike information criterion\r\n - ``'icc'`` - Informational complexity criterion \r\n\r\n description : string, optional \r\n Description of the penalty.\r\n \r\n signature : list of strings\r\n Signature of the ``penaltyfcn`` function to manually specify the names\r\n of the input arguments. For internal use (mostly).\r\n\r\n \"\"\" \r\n #-------------------------------------------------------------------------------\r\n def selectionfunctional(fitfcn,y,sigma,log10weight):\r\n # Penalty weight: linear-scale -> log-scale\r\n weight = 10**log10weight\r\n self._weight_value = weight\r\n # Run the fit\r\n fitresult = fitfcn(weight)\r\n\r\n if selection=='icc':\r\n # Get the fitted model\r\n yfit = fitresult.model\r\n\r\n # Get non-linear parameters covariance submatrix\r\n fitpars = fitresult.nonlin + np.finfo(float).eps\r\n covmat = fitresult.nonlinUncert.covmat + np.finfo(float).eps\r\n covmat = covmat/(fitpars[np.newaxis,:]*fitpars[:,np.newaxis])\r\n\r\n # Informational complexity criterion (ICC)\r\n if not np.all(covmat==0):\r\n icc = np.sum((y - yfit)**2/sigma**2) + np.sum(np.log(np.diag(covmat))) + np.linalg.slogdet(covmat)[1]\r\n else:\r\n icc = np.sum((y - yfit)**2/sigma**2)\r\n return icc \r\n\r\n elif selection=='aic':\r\n aic = fitresult.stats['aic']\r\n return aic\r\n\r\n elif selection=='aicc':\r\n aicc = fitresult.stats['aicc']\r\n return aicc\r\n\r\n elif selection=='bic':\r\n bic = fitresult.stats['bic']\r\n return bic\r\n #-------------------------------------------------------------------------------\r\n\r\n # Set the weighted penalty function\r\n self.penaltyfcn = lambda weight,*args: weight*penaltyfcn(*args)\r\n # Set the selection functional\r\n self.selectionfcn = selectionfunctional\r\n self.selection = selection\r\n # Prepare empty attributes\r\n self.description = description\r\n\r\n # Get the penalty signature\r\n if signature is None:\r\n self.signature = inspect.getfullargspec(penaltyfcn).args\r\n else: \r\n self.signature = signature\r\n\r\n # Create parameter object for the penalty weight\r\n newparam = Parameter(parent=self, idx=0, name='weight')\r\n \r\n # Add to the penalty object\r\n setattr(self,'weight',newparam)\r\n self.weight.lb = np.finfo(float).eps\r\n\r\n # Remove useless attributes\r\n delattr(self.weight,'par0')\r\n delattr(self.weight,'linear')\r\n #--------------------------------------------------------------------------\r\n\r\n #--------------------------------------------------------------------------\r\n def optimize(self,fitfcn,y,sigma):\r\n r\"\"\"\r\n Optimize the penalty weight. \r\n\r\n Parameters\r\n ----------\r\n fitfcn : callable\r\n Fit function taking a penalty weight value. Must return\r\n a :ref:`FitResult` object.\r\n y : array_like \r\n Dataset being fitted \r\n sigma : scalar \r\n Estimated noise level (standard deviation). \r\n \r\n Returns \r\n -------\r\n fit : :ref:`FitResult`\r\n Fit at the optimized penalty weight. \r\n weightopt : scalar \r\n Optimized penalty weight.\r\n \"\"\"\r\n if not self.weight.frozen:\r\n # Extract optimization range from model penalty\r\n searchrange = np.array([np.log10(self.weight.lb), np.log10(self.weight.ub)])\r\n searchrange[np.isinf(searchrange)] = 20\r\n # Construct the selection functional\r\n selectionFunctional = lambda log10weight: self.selectionfcn(fitfcn,y,sigma,log10weight)\r\n\r\n # Minimization of the selection functional\r\n log10optweight = fminbound(selectionFunctional,*searchrange, xtol=0.1)\r\n\r\n # Logscale -> linear-scale\r\n optweight = 10**log10optweight\r\n else: \r\n optweight = self.weight.value\r\n self._weight_value = optweight\r\n\r\n # Update optimized value to object\r\n self.optweight = optweight\r\n\r\n # Get the fit result\r\n fitresult = fitfcn(optweight)\r\n\r\n return fitresult,optweight\r\n #--------------------------------------------------------------------------\r\n#==============================================================================\r\n\r\n#--------------------------------------------------------------------------\r\ndef _outerOptimization(fitfcn,penalty_objects,y,sigma):\r\n\r\n # If there are no penalties\r\n if len(penalty_objects)==0:\r\n fitfcn_ = lambda y: fitfcn(y,[None])\r\n\r\n # Otherwise, prepare to solve multiobjective problem \r\n elif len(penalty_objects)==3:\r\n thirdfcn = lambda y,*param: penalty_objects[2].optimize(lambda weight: fitfcn(y,[*param,weight]),y,sigma)[1]\r\n secondfcn = lambda y,*param: penalty_objects[1].optimize(lambda weight: fitfcn(y,[*param,weight,thirdfcn(y,*param,weight)]),y,sigma)[1]\r\n fitfcn_ = lambda y: penalty_objects[0].optimize(lambda weight: fitfcn(y,[weight,secondfcn(y,weight),thirdfcn(y,weight,secondfcn(weight))]),y,sigma)[0]\r\n\r\n elif len(penalty_objects)==2:\r\n secondfcn = lambda y,*param: penalty_objects[1].optimize(lambda weight: fitfcn(y,[*param,weight]),y,sigma)[1]\r\n fitfcn_ = lambda y: penalty_objects[0].optimize(lambda weight: fitfcn(y,[weight,secondfcn(y,weight)]),y,sigma)[0]\r\n\r\n elif len(penalty_objects)==1:\r\n fitfcn_ = lambda y: penalty_objects[0].optimize(lambda weight: fitfcn(y,[weight]),y,sigma)[0]\r\n else: \r\n raise RuntimeError('The fit() function can only handle up to three penalties.')\r\n\r\n return fitfcn_\r\n#--------------------------------------------------------------------------\r\n\r\n#--------------------------------------------------------------------------\r\ndef _print_fitresults(fitresult,model):\r\n \"\"\"Construct summary table of fit results to print\"\"\"\r\n\r\n # Start printout string\r\n string = ''\r\n # Get number of models in the fit\r\n modelfits = fitresult.model\r\n if not isinstance(modelfits,list):\r\n modelfits = [modelfits]\r\n Ndatasets = len(modelfits)\r\n\r\n # Construct table of goodness-of-fit statistics\r\n table = []\r\n table.append([f'Dataset','Noise level','Reduced 𝛘2','RMSD','AIC']) # Header\r\n alignment = ['^','^','^','^','^'] # Tab alignment\r\n stats = np.atleast_1d(fitresult.stats)\r\n noiselevels = np.atleast_1d(fitresult.noiselvl)\r\n for n in range(Ndatasets):\r\n noiselvl = noiselevels[n]\r\n chi2red = stats[n]['chi2red']\r\n rmsd = stats[n]['rmsd']\r\n aic = stats[n]['aic']\r\n noiselvl,chi2red,rmsd,aic = [f'{var:.3f}' if var<1e4 else f'{var:.3g}' for var in [noiselvl,chi2red,rmsd,aic]] \r\n table.append([f'#{1+n}',noiselvl,chi2red,rmsd,aic])\r\n # Add auto-formatted table string\r\n string += 'Goodness-of-fit: \\n'\r\n string += formatted_table(table,alignment) + '\\n'\r\n\r\n # Construct table of model parameters fits\r\n table = []\r\n table.append([f'Parameter','Value','95%-Confidence interval','Units','Description']) # Header\r\n alignment = ['<','<','<','^','<'] # Alignment\r\n for param in model._parameter_list('vector'):\r\n if len(np.atleast_1d(getattr(model,param).idx))==1:\r\n if np.any(getattr(model,param).frozen): \r\n # If parameter is frozen, print just the value\r\n value = getattr(model,param).value\r\n if hasattr(value,'__len__'): value = value[0]\r\n value = f'{value:.3f}' if value<1e4 else f'{value:.3g}'\r\n ci = '(frozen)'\r\n else:\r\n # If parameter is scalar, report values and CIs\r\n value = getattr(fitresult,param)\r\n ci_lower,ci_upper = getattr(fitresult,param+'Uncert').ci(95)\r\n value,ci_lower,ci_upper = [f'{var:.3f}' if var<1e4 else f'{var:.3g}' for var in [value,ci_lower,ci_upper]]\r\n ci = f'({ci_lower},{ci_upper})'\r\n else:\r\n # If parameter is vectorial, print just dots\r\n value = '...'\r\n ci = '(...,...)'\r\n units = str(getattr(model,param).units)\r\n description = str(getattr(model,param).description)\r\n table.append([f'{param}',value,ci,units,description])\r\n # Add auto-formatted table string\r\n string += 'Model parameters: \\n'\r\n string += formatted_table(table,alignment)\r\n return string\r\n#--------------------------------------------------------------------------\r\n\r\ndef insert_snlls_optionals_docstrings():\r\n # Get the documentation for the optional keyword arguments in snlls.py also used by fit()\r\n text = snlls.__doc__\r\n text = text.split('\\n\\n')\r\n # Exclude arguments already set by the outer function\r\n exclude = ['lb','ub','lbl','ubl','subsets','lin_frozen','nonlin_frozen','regparam','reg','regparamrange', 'extrapenalty']\r\n paragraphs = [s for s in text if not any(e in s for e in exclude)]\r\n # Concatenate the arguments\r\n snlls_keyargs_docs = ''\r\n for paragraph in paragraphs: \r\n # Only keep optional keyword arguments\r\n if 'optional' in paragraph:\r\n snlls_keyargs_docs += paragraph + '\\n' \r\n\r\n def decorator(func):\r\n func.__doc__ = func.__doc__.replace('snlls_keyargs_docstrings',snlls_keyargs_docs)\r\n return func\r\n return decorator\r\n\r\n#==============================================================================================\r\n@insert_snlls_optionals_docstrings()\r\ndef fit(model_, y, *constants, par0=None, penalties=None, bootstrap=0, noiselvl=None, mask=None, weights=None,\r\n regparam='aic',reg='auto',regparamrange=None,**kwargs):\r\n r\"\"\"\r\n Fit the model(s) to the dataset(s)\r\n\r\n Fit the input model to the data ``y`` via one of the three following approaches: \r\n \r\n - Non-linear least-squares \r\n - Regularized linear-least-squares \r\n - Separable non-linear least-squares \r\n\r\n The most appropiate solver is chosen automatically based on the model structure. \r\n\r\n Parameters\r\n ----------\r\n model : :ref:`Model`\r\n Model object. \r\n y : array_like \r\n Data to be fitted. \r\n par0 : array_like, optional \r\n Value at which to initialize the parameter at the start of a fit routine. \r\n Must be specified if not defined in the model. Otherwise, it overrides the definition in the model. \r\n snlls_keyargs_docstrings\r\n\r\n Returns\r\n -------\r\n :ref:`FitResult` with the following fields defined:\r\n <parameter_name> : :ref:`Parameter`\r\n Fitted value of the <parameter_name> model parameter.\r\n <parameter_name>Uncert : :ref:`UQResult`\r\n Uncertainty quantification of the <parameter_name> model parameter.\r\n param : ndarray\r\n Fitted parameter vector ordered according to the model parameter indices.\r\n paramUncert : :ref:`UQResult`\r\n Uncertainty quantification of the parameter vector ordered according to the model parameter indices.\r\n model : ndarray\r\n Fitted model response.\r\n modelUncert : :ref:`UQResult`\r\n Uncertainty quantification of the fitted model response. \r\n regparam : scalar\r\n Regularization parameter value used for the regularization of the linear parameters.\r\n plot : callable\r\n Function to display the results. It will display the fitted data.\r\n The function returns the figure object (``matplotlib.figure.Figure``)\r\n object as output, which can be modified. A vector for the x-axis and its label can\r\n be specified by calling ``FitResult.plot(axis=axis,xlabel='xlabel')``.\r\n stats : dict\r\n Goodness of fit statistical estimators\r\n\r\n * ``stats['chi2red']`` - Reduced \\chi^2 test\r\n * ``stats['r2']`` - R^2 test\r\n * ``stats['rmsd']`` - Root-mean squared deviation (RMSD)\r\n * ``stats['aic']`` - Akaike information criterion\r\n * ``stats['aicc']`` - Corrected Akaike information criterion\r\n * ``stats['bic']`` - Bayesian information criterion\r\n cost : float\r\n Value of the cost function at the solution.\r\n \"\"\"\r\n\r\n if not isinstance(model_,Model):\r\n raise TypeError('The input model must be a valid deerlab.Model object.')\r\n else:\r\n model = copy(model_)\r\n\r\n required = len(model._constantsInfo)\r\n if len(constants)!=required: \r\n raise SyntaxError(f'The input model requires {required} constant(s) to be specified. Specify them via fit(model,y,*constants).')\r\n elif len(constants)>0:\r\n constants = np.atleast_1d(constants)\r\n\r\n if model.Nlin==0:\r\n model.addlinear('scale',lb=-np.inf,ub=np.inf,description='Scaling factor')\r\n\r\n # Get boundaries and conditions for the linear and nonlinear parameters\r\n ubl,ub = model._split_linear(model._vecsort(model._getvector('ub')))\r\n lbl,lb = model._split_linear(model._vecsort(model._getvector('lb')))\r\n frozenl,frozen = model._split_linear(model._vecsort(model._getvector('frozen')))\r\n valuesl,values = model._split_linear(model._vecsort(model._getvector('value')))\r\n\r\n # Check the initial conditions and whether they are defined\r\n if par0 is None:\r\n _,par0 = model._split_linear(model._vecsort(model._getvector('par0')))\r\n if np.any(par0==None):\r\n raise RuntimeError(f\"It appears some start values (par0) have not been specified. Either specify them in the model definition or using the keyword.\")\r\n\r\n linfrozen = np.full(model.Nlin,None)\r\n linfrozen[frozenl] = valuesl[frozenl]\r\n nonlinfrozen = np.full(model.Nnonlin,None)\r\n nonlinfrozen[frozen] = values[frozen]\r\n\r\n if len(linfrozen)==0: linfrozen = [1]\r\n\r\n if type(y) is not list: y = [y]\r\n ysplit = y.copy()\r\n y, _, weights, mask, ysubsets, noiselvl = parse_multidatasets(y, None, weights, noiselvl, precondition=False, masks=mask)\r\n sigmas = np.concatenate([np.full_like(yset,sigma) for sigma,yset in zip(noiselvl,ysplit)])\r\n\r\n\r\n if model.Nlin==0 and model.Nnonlin==0:\r\n raise AssertionError(f'The model has no parameters to fit.') \r\n\r\n # Get parameter indices in the order spitted out by the solver\r\n param_idx = [[]]*len(model._parameter_list('vector'))\r\n idxprev = 0\r\n for islinear in [False,True]:\r\n for n,param in enumerate(model._parameter_list('vector')):\r\n if np.all(getattr(model,param).linear == islinear):\r\n N = len(np.atleast_1d(getattr(model,param).idx))\r\n param_idx[n] = np.arange(idxprev,idxprev + N)\r\n idxprev += N \r\n\r\n # If there are penalties in the model\r\n if penalties is not None:\r\n if not hasattr(penalties, '__iter__'): \r\n penalties = [penalties]\r\n # Get the parameter names of the model\r\n modelparam = model._parameter_list('vector')\r\n penaltyfcns = []\r\n for penalty in penalties:\r\n # Determine the indices of the subset of parameters the model depends on\r\n subsets = [getattr(model,modelparam[np.where(np.asarray(modelparam)==param)[0][0]]).idx for param in penalty.signature]\r\n # Adapt the signature of penaltyfcn for snlls\r\n penaltyfcns.append(lambda pnonlin,plin,weight: penalty.penaltyfcn(weight,*[np.concatenate([pnonlin,plin])[subset] for subset in subsets]))\r\n\r\n # Prepare the penalties to input to snlls\r\n extrapenalties = lambda weights: [lambda nonlin,lin: penaltyfcn(nonlin,lin,weight) for penaltyfcn,weight in zip(penaltyfcns,weights)]\r\n else: \r\n # If there are no penalties in the model\r\n penalties = []\r\n extrapenalties = lambda *_: None\r\n\r\n # Prepare the separable non-linear least-squares solver\r\n Amodel_fcn = lambda param: model.nonlinmodel(*constants,*param)\r\n fitfcn = lambda y,penweights: snlls(y, Amodel_fcn, par0, lb=lb, ub=ub, lbl=lbl, ubl=ubl, mask=mask, weights=weights, \r\n subsets=ysubsets, lin_frozen=linfrozen, nonlin_frozen=nonlinfrozen,\r\n regparam=regparam, reg=reg, regparamrange=regparamrange, noiselvl=noiselvl,\r\n extrapenalty=extrapenalties(penweights), **kwargs) \r\n\r\n # Prepare outer optimization of the penalty weights, if necessary\r\n fitfcn = _outerOptimization(fitfcn,penalties,y,sigmas)\r\n\r\n # Run the fitting algorithm \r\n fitresults = fitfcn(y)\r\n\r\n # If requested, perform a bootstrap analysis\r\n if bootstrap>0: \r\n def bootstrap_fcn(ysim): \r\n fit = fitfcn(np.concatenate(ysim))\r\n if not isinstance(fit.model,list): fit.model = [fit.model]\r\n return (fit.param,*fit.model)\r\n # Bootstrapped uncertainty quantification\r\n param_uq = bootstrap_analysis(bootstrap_fcn,ysplit,fitresults.model,samples=bootstrap)\r\n # Include information on the boundaries for better uncertainty estimates\r\n paramlb = model._vecsort(model._getvector('lb'))[np.concatenate(param_idx)] \r\n paramub = model._vecsort(model._getvector('ub'))[np.concatenate(param_idx)] \r\n fitresults.paramUncert = UQResult('bootstrap',data=param_uq[0].samples,lb=paramlb,ub=paramub)\r\n fitresults.param = fitresults.paramUncert.median\r\n # Get the uncertainty estimates for the model response\r\n fitresults.model = [param_uq[n].median for n in range(1,len(param_uq))]\r\n fitresults.modelUncert = [param_uq[n] for n in range(1,len(param_uq))]\r\n if len(fitresults.model)==1: \r\n fitresults.model = fitresults.model[0]\r\n fitresults.modelUncert = fitresults.modelUncert[0]\r\n # Get some basic information on the parameter vector\r\n keys = model._parameter_list(order='vector')\r\n \r\n # Dictionary of parameter names and fitted values\r\n FitResult_param = {key : fitvalue if len(fitvalue)>1 else fitvalue[0] for key,fitvalue in zip(keys,[fitresults.param[idx] for idx in param_idx])}\r\n # Dictionary of parameter names and fit uncertainties\r\n FitResult_paramuq = {f'{key}Uncert': model._getparamuq(fitresults.paramUncert,idx) for key,idx in zip(keys,param_idx)}\r\n # Dictionary of other fit quantities of interest\r\n FitResult_dict = {key: getattr(fitresults,key) for key in ['param','paramUncert','model','modelUncert','cost','plot','residuals','stats','regparam']}\r\n\r\n\r\n _paramlist = model._parameter_list('vector')\r\n def propagate(model,*constants,lb=None,ub=None):\r\n # ----------------------------------------------------------------------------\r\n \"\"\"\r\n Propagate the uncertainty in the fit results to a model's response.\r\n\r\n Parameters\r\n ----------\r\n\r\n model : :ref:`Model` or callable\r\n Model object or callable function to be evaluated. All the parameters in the model or in the callable definition\r\n must match their corresponding parameter names in the ``FitResult`` object. \r\n constants : array_like \r\n Model constants. \r\n lb : array_like, optional \r\n Lower bounds of the model response.\r\n ub : array_like, optional \r\n Upper bounds of the model response. \r\n\r\n Returns\r\n -------\r\n\r\n responseUncert : :ref:`UQResult`\r\n Uncertainty quantification of the model's response.\r\n \"\"\"\r\n # Get the parameter names of the input model\r\n if isinstance(model,Model):\r\n modelparam = model._parameter_list('vector')\r\n elif callable(model):\r\n modelparam = inspect.getfullargspec(model).args\r\n else: \r\n raise TypeError('The input must be a deerlab.Model object or a callable.')\r\n\r\n # Check that all parameters are in the fit object\r\n for param in modelparam:\r\n if not param in FitResult_param: \r\n raise KeyError(f'The fit object does not contain the {param} parameter.')\r\n # Determine the indices of the subset of parameters the model depends on\r\n subset = [param_idx[np.where(np.asarray(_paramlist)==param)[0][0]] for param in modelparam]\r\n # Propagate the uncertainty from that subset to the model\r\n modeluq = fitresults.paramUncert.propagate(lambda param: model(*constants,*[param[s] for s in subset]),lb,ub)\r\n return modeluq\r\n # ----------------------------------------------------------------------------\r\n\r\n def evaluate(model,*constants):\r\n # ----------------------------------------------------------------------------\r\n \"\"\"\r\n Evaluate a model at the fitted parameter values. \r\n\r\n Parameters\r\n ----------\r\n\r\n model : :ref:`Model` or callable\r\n Model object or callable function to be evaluated. All the parameters in the model or in the callable definition\r\n must match their corresponding parameter names in the ``FitResult`` object. \r\n constants : array_like \r\n Any model constants present required by the model. \r\n \r\n Returns\r\n -------\r\n\r\n response : array_like \r\n Model response at the fitted parameter values. \r\n \"\"\"\r\n if isinstance(model,Model):\r\n modelparam = model._parameter_list('vector')\r\n elif callable(model):\r\n modelparam = inspect.getfullargspec(model).args\r\n else: \r\n raise TypeError('The input must be a deerlab.Model object or a callable.')\r\n\r\n # Check that all parameters are in the fit object\r\n for param in modelparam:\r\n if not param in FitResult_param: \r\n raise KeyError(f'The fit object does not contain the {param} parameter.')\r\n # Determine the indices of the subset of parameters the model depends on\r\n parameters = {param: FitResult_param[param] for param in modelparam}\r\n # Evaluate the input model\r\n response = model(*constants,**parameters)\r\n return response\r\n # ----------------------------------------------------------------------------\r\n\r\n if len(noiselvl)==1: \r\n noiselvl = noiselvl[0]\r\n\r\n penweights = [penalty._weight_value for penalty in penalties]\r\n\r\n # Generate FitResult object from all the dictionaries\r\n fitresult = FitResult({**FitResult_param,**FitResult_paramuq, **FitResult_dict,'penweights':penweights,'noiselvl':noiselvl, 'propagate': propagate, 'evaluate': evaluate}) \r\n fitresult._summary = _print_fitresults(fitresult,model)\r\n\r\n return fitresult\r\n#==============================================================================================\r\n\r\ndef _importparameter(parameter):\r\n return {\r\n 'lb' : parameter.lb,\r\n 'ub' : parameter.ub,\r\n 'par0' : parameter.par0,\r\n 'description' : parameter.description,\r\n 'units' : parameter.units,\r\n 'frozen' : parameter.frozen,\r\n 'value' : parameter.value,\r\n }\r\n\r\ndef _aresame(obj1,obj2):\r\n a = obj1.__dict__\r\n a = {key:val for key, val in a.items() if key not in ['_parent','idx']}\r\n b = obj2.__dict__\r\n b = {key:val for key, val in b.items() if key not in ['_parent','idx']}\r\n try:\r\n np.testing.assert_equal(a,b)\r\n return True\r\n except Exception:\r\n return False\r\n# ==============================================================================\r\ndef link(model,**links):\r\n \"\"\"\r\n Create equality relationships between parameters \r\n\r\n Parameters\r\n ----------\r\n model : :ref:`Model`\r\n Model object. \r\n links : keyword-argument pairs \r\n Keyword-argument pairs, where the arguments must be lists of model parameter names. \r\n The corresponding model parameter will be assigned to new parameter whose name is given \r\n by the keyword name. For example:: \r\n\r\n newmodel = link(model,parC = ['parA','parB'])\r\n\r\n will return a new model where the values of ``parA`` and ``parB`` will be given by the\r\n new model parameter ``parC``. \r\n\r\n Returns\r\n -------\r\n newmodel : :ref:`Model`\r\n New model object without the linked parameter and with the newly defined parameters. \r\n \"\"\"\r\n def _linkparameter(model,parameters,newname):\r\n # --------------------------------------------------------------------- \r\n # Get a list of parameter names in the model\r\n model_parameters = model._parameter_list(order='vector')\r\n\r\n link_parameters,link_indices,link_param_idx = [],[],[]\r\n for param in parameters:\r\n if not isinstance(param,Parameter) and isinstance(param,str):\r\n param = getattr(model,param)\r\n # Make list of parameter objects\r\n link_parameters.append(param)\r\n # Make list of parameter indices\r\n link_indices.append(param.idx)\r\n\r\n # Get the names of the parameters to be linked\r\n link_names = []\r\n for param in parameters:\r\n if isinstance(param,Parameter):\r\n for mparam in model_parameters:\r\n if _aresame(param,getattr(model,mparam)):\r\n link_names.append(mparam)\r\n break\r\n else: \r\n link_names.append(param)\r\n\r\n # Remove the first from the list as it will be kept\r\n linked_name = link_names.pop(0)\r\n \r\n for n,name in enumerate(model_parameters):\r\n if name==linked_name: link_param_idx = n\r\n\r\n \r\n\r\n # Get the vector index of the parameter to be linked to\r\n link_indices = np.atleast_1d(link_indices[0])\r\n\r\n nnew = 0\r\n # Initialize the maps linked->unlinked\r\n mapping = np.zeros(model.Nnonlin,dtype=int)\r\n mapping_linear = np.zeros(model.Nlin,dtype=int)\r\n\r\n # Get the indices of the unlinked parameters in the maps\r\n unlinked_linear_idx = np.full(len(model_parameters),None)\r\n unlinked_nonlinear_idx = np.full(len(model_parameters),None)\r\n linked_linear_idx = np.full(len(model_parameters),None)\r\n linked_nonlinear_idx = np.full(len(model_parameters),None)\r\n q = 0\r\n nnew = 0\r\n for n,param in enumerate(model_parameters):\r\n if np.all(getattr(model,param).linear):\r\n m = len(np.atleast_1d(getattr(model,param).idx)) \r\n unlinked_linear_idx[n]= np.arange(q,q+m)\r\n q += m\r\n if param not in link_names:\r\n linked_linear_idx[n]= np.arange(nnew,nnew+m)\r\n else:\r\n unlinked_nonlinear_idx[n] = np.array(n)\r\n if param not in link_names: \r\n linked_nonlinear_idx[n] = np.array(nnew)\r\n m = 1\r\n if param not in link_names: \r\n nnew += m\r\n Nnl = model.Nnonlin\r\n \r\n # Loop over all parameters in the model\r\n for n,param in enumerate(model_parameters):\r\n # If the parameter is to be linked...\r\n if param in link_names:\r\n # Update the number of parameters in the model\r\n Nremoved = len(np.atleast_1d(getattr(model,param).idx))\r\n model.Nparam -= Nremoved\r\n if np.all(getattr(model,param).linear):\r\n model.Nlin -= Nremoved\r\n # Update the parameter vector map\r\n mapping_linear[unlinked_linear_idx[n]] = link_indices-Nnl \r\n else: \r\n model.Nnonlin -= Nremoved\r\n # Update the parameter vector map\r\n mapping[unlinked_nonlinear_idx[n]] = link_indices\r\n # Delete the linked parameter from the model\r\n delattr(model,param)\r\n\r\n # Otherwise if the parameter is not linked...\r\n else:\r\n # Update the index of the parameter in the new vector \r\n if not np.all(getattr(model,param).linear):\r\n getattr(model,param).idx = linked_nonlinear_idx[n]\r\n mapping[unlinked_nonlinear_idx[n]] = linked_nonlinear_idx[n]\r\n else: \r\n getattr(model,param).idx = linked_linear_idx[n]\r\n mapping_linear[unlinked_linear_idx[n]] = linked_linear_idx[n]-Nnl\r\n\r\n # Delete the old copy with the old name\r\n paramobj = getattr(model,model_parameters[link_param_idx])\r\n delattr(model,model_parameters[link_param_idx])\r\n # Create a copy of the linked parameter with the new name\r\n setattr(model,newname,paramobj)\r\n\r\n # Monkey-patch the evaluation function \r\n nonlinfcn = model.nonlinmodel\r\n linear_reduce_idx = [np.where(mapping_linear==n)[0].tolist() for n in np.unique(mapping_linear) ]\r\n Nconstants = len(model._constantsInfo)\r\n # ---------------------------------------------------------------------\r\n def linked_model_with_constants(*inputargs):\r\n # Redistribute the input parameter vector according to the mapping vector\r\n constants = inputargs[:Nconstants]\r\n θ = inputargs[Nconstants:]\r\n θ = np.atleast_1d(θ)[mapping]\r\n args = list(θ)\r\n if model._constantsInfo is not None:\r\n for info,constant in zip(model._constantsInfo,constants):\r\n args.insert(info['argidx'],constant) \r\n A = nonlinfcn(*args)\r\n if len(A.shape)<2: A = np.expand_dims(A,1)\r\n Amapped = np.vstack([np.sum(np.atleast_2d(A[:,idx]),axis=1) for idx in linear_reduce_idx]).T\r\n return Amapped\r\n # ---------------------------------------------------------------------\r\n model.nonlinmodel = linked_model_with_constants\r\n\r\n # Return the updated model with the linked parameters\r\n return model\r\n # ---------------------------------------------------------------------\r\n\r\n if not isinstance(model,Model):\r\n raise TypeError('The first argument must be a Model object.')\r\n newmodel = deepcopy(model)\r\n # Perform the linking, one by one\r\n for link_newname in links: \r\n to_link = [getattr(newmodel,parname) for parname in links[link_newname]]\r\n newmodel = _linkparameter(newmodel,to_link,link_newname)\r\n # Update the new model signature\r\n for key in links.keys():\r\n newmodel.signature = [key if arg==links[key][0] else arg for arg in newmodel.signature]\r\n for arg in links[key]:\r\n if arg in newmodel.signature: \r\n newmodel.signature.remove(arg)\r\n return newmodel\r\n#==============================================================================================\r\n\r\n# ---------------------------------------------------------------------\r\ndef _unique_ordered(vec):\r\n uniques = []\r\n for v in vec: \r\n if v not in uniques:\r\n uniques.append(v)\r\n return uniques\r\n# ---------------------------------------------------------------------\r\n\r\n\r\n#==============================================================================================\r\ndef _combinemodels(mode,*inputmodels,addweights=False): \r\n\r\n # Initialize empty containers\r\n subsets_nonlin,arguments,arelinear = [],[],[]\r\n nprev = 0\r\n\r\n if len(inputmodels)==1:\r\n return inputmodels[0]\r\n\r\n # Make deep-copies of the models to avoid modifying them\r\n models = [deepcopy(model) for model in inputmodels]\r\n\r\n if addweights:\r\n\r\n for n,(model,nonlinfcn) in enumerate(zip(models,[model.nonlinmodel for model in models])):\r\n constants = [constant['argkey'] for constant in model._constantsInfo]\r\n signature = []\r\n for param in model.signature: \r\n if param in constants:\r\n signature.append(param) \r\n else:\r\n if not np.any(getattr(model,param).linear):\r\n signature.append(param) \r\n \r\n signature.append('weight')\r\n def make_weighted_comb(nonlinfcn):\r\n def weighted_comb(*inputargs):\r\n weight = inputargs[-1]\r\n param = inputargs[:-1]\r\n return weight*nonlinfcn(*param)\r\n return weighted_comb\r\n constants = [entry['argkey'] for entry in model._constantsInfo]\r\n weightedModel = Model(make_weighted_comb(nonlinfcn),constants=constants,signature=signature)\r\n for name in model._parameter_list(order='vector'):\r\n if np.any(getattr(model,name).linear):\r\n weightedModel.addlinear(name,vec=len(np.atleast_1d(getattr(model,name).idx))) \r\n getattr(weightedModel,name).set(**_importparameter(getattr(model,name)))\r\n getattr(weightedModel,'weight').set(lb=0,par0=1,description='Weighting factor')\r\n models[n] = deepcopy(weightedModel)\r\n\r\n # Loop over all models to be combined\r\n for n,model in enumerate(models): \r\n \r\n # If one of the models has linear parameters, but not the others\r\n # add a dummy unity linear parameter \r\n if model.Nlin==0:\r\n model.addlinear('scale',par0=1,lb=0, description='Scaling factor')\r\n\r\n # Determine the subset of parameters for the current model\r\n subset = np.arange(nprev,nprev+model.Nnonlin,1)\r\n nprev += model.Nnonlin\r\n # From that subset, determine the non-linear subset\r\n subset_nonlin = subset[np.arange(model.Nnonlin)]\r\n subsets_nonlin.append(subset_nonlin)\r\n\r\n # Determine which parameters are linear\r\n arelinear = np.concatenate([arelinear,model._vecsort(model._getvector('linear'))])\r\n\r\n newarguments = model._parameter_list(order='vector') \r\n # If there is more than one model, append a string to identify the origin\r\n if len(models)>1: \r\n newarguments = [arg+f'_{n+1}' for arg in newarguments] \r\n\r\n oldargs = models[n]._parameter_list(order='vector')\r\n i = 0\r\n for oldkey,newkey in zip(oldargs,newarguments): \r\n if isinstance(getattr(model,oldkey).idx,np.ndarray):\r\n newarguments = np.insert(newarguments,i*np.ones(len(np.atleast_1d(getattr(model,oldkey).idx))-1,dtype=int),newkey).tolist()\r\n i += len(np.atleast_1d(getattr(model,oldkey).idx))\r\n\r\n # Add the submodel arguments to the combined model signature\r\n arguments += newarguments\r\n\r\n # Preparation of the combined model signature \r\n arelinear = np.asarray(arelinear).astype(bool)\r\n arguments = np.array(arguments)\r\n lin_params = arguments[arelinear]\r\n nonlin_params = arguments[~arelinear]\r\n\r\n # Account for the constant arguments\r\n constants = []\r\n const_subsets = []\r\n for n,model in enumerate(models):\r\n subset = []\r\n for info in model._constantsInfo:\r\n constants.append(f\"{info['argkey']}_{n+1}\")\r\n subset.append(len(constants)-1)\r\n const_subsets.append(subset) \r\n signature = np.insert(nonlin_params,0,constants).tolist()\r\n Nconst = len(constants)\r\n nonlinfcns = [model.nonlinmodel for model in models]\r\n Nlins = [model.Nlin for model in models]\r\n ysizes = [[]]*len(models)\r\n\r\n #---------------------------------------------------------------------\r\n def _combined_nonlinmodel(*inputargs):\r\n \"\"\"Evaluates the nonlinear functions of the submodels and \r\n concatenates them into a single design matrix\"\"\"\r\n\r\n nonlocal ysizes\r\n\r\n constants = inputargs[:Nconst]\r\n param = inputargs[Nconst:]\r\n\r\n param = np.atleast_1d(param)\r\n constants = np.atleast_2d(constants)\r\n # Loop over the submodels in the model\r\n Amatrices = []\r\n for n,nonlinfcn in enumerate(nonlinfcns):\r\n # Evaluate the submodel\r\n Amatrix = np.atleast_2d(nonlinfcn(*constants[const_subsets[n],:],*param[subsets_nonlin[n]]))\r\n if np.shape(Amatrix)[1]!=Nlins[n]: Amatrix = Amatrix.T\r\n Amatrices.append(Amatrix)\r\n if mode=='merge':\r\n ysizes = [A.shape[0] for A in Amatrices]\r\n Anonlin_full = block_diag(Amatrices).toarray()\r\n if not any(arelinear):\r\n Anonlin_full = np.sum(Anonlin_full,1)\r\n\r\n elif mode=='lincombine':\r\n Anonlin_full = np.hstack(Amatrices)\r\n\r\n return Anonlin_full\r\n #---------------------------------------------------------------------\r\n \r\n #---------------------------------------------------------------------\r\n def _split_output(y,*inputargs):\r\n nonlocal ysizes\r\n nprev = 0\r\n ysubsets = []\r\n for x in ysizes:\r\n ysubsets.append(np.arange(nprev,nprev+x))\r\n nprev = nprev+x\r\n return [y[ysubsets[n]] for n in range(len(ysizes))]\r\n #---------------------------------------------------------------------\r\n\r\n # Create the model object\r\n combinedModel = Model(_combined_nonlinmodel,constants=constants,signature=signature)\r\n\r\n # Add parent models \r\n combinedModel.parents = models\r\n\r\n if mode=='merge':\r\n # Add post-evalution function for splitting of the call outputs\r\n setattr(combinedModel,'_posteval_fcn',_split_output) \r\n\r\n # Add the linear parameters from the subset models \r\n lin_param_set = []\r\n for param in _unique_ordered(lin_params):\r\n lin_param_set.append({'name':param, 'vec':np.sum(lin_params==param)})\r\n\r\n for lparam in lin_param_set:\r\n combinedModel.addlinear(lparam['name'], vec=lparam['vec'])\r\n\r\n parameters = np.concatenate([arguments,lin_params])\r\n # Import all parameter information from the subset models\r\n for name,param in zip(combinedModel._parameter_list(order='vector'),parameters):\r\n if '_' in name:\r\n param = name.rsplit('_',1)[0]\r\n n = name.rsplit('_',1)[-1]\r\n n = int(n)-1\r\n else:\r\n param,n = name,0\r\n getattr(combinedModel,name).set(**_importparameter(getattr(models[n],param)))\r\n\r\n # Return the new combined model object\r\n return combinedModel \r\n#==============================================================================================\r\n\r\n#==============================================================================================\r\ndef merge(*inputmodels,addweights=False):\r\n \"\"\"\r\n Create a multi-response model from multiple individual models. \r\n\r\n Parameters\r\n ----------\r\n inputmodels : :ref:`Model` objects\r\n Model objects to be combined. If one of the models has no linear parameters, a linear \r\n scaling factor parameters will be added. The names of the ``N``-th input model parameter will be \r\n changed by a suffix ``_N`` in the new model. Example:: \r\n\r\n newmodel = merge(model1,model2)\r\n newmodel.parA_1 # Originally parA from model1\r\n newmodel.parA_2 # Originally parA from model2\r\n\r\n\r\n addweights : boolean, optional \r\n If true, the function will add a non-linear weight parameter for each model response\r\n even if the individual models have linear parameters. \r\n\r\n Returns\r\n -------\r\n newmodel : :ref:`Model`\r\n New model object taking the combined parameter set and returning a list of model reponses\r\n correponding to each of the input models. \r\n \"\"\"\r\n return _combinemodels('merge',*inputmodels,addweights=addweights)\r\n#==============================================================================================\r\n\r\n#==============================================================================================\r\ndef lincombine(*inputmodels,addweights=False):\r\n \"\"\"\r\n Create model whose response is a linear combination of multiple individual model responses. \r\n\r\n Parameters\r\n ----------\r\n inputmodels : :ref:`Model` objects\r\n Model objects whose linear responses are to be linearly combined. If one of the models \r\n has no linear parameters, a linear scaling factor parameters will be added. The names \r\n of the ``N``-th input model parameter will be changed by a suffix ``_N`` in the new model. Example:: \r\n\r\n newmodel = lincombine(model1,model2)\r\n newmodel.parA_1 # Originally parA from model1\r\n newmodel.parA_2 # Originally parA from model2\r\n\r\n addweights : boolean, optional \r\n If true, the function will add a non-linear weight parameter for each model response\r\n even if the individual models have linear parameters. \r\n\r\n Returns\r\n -------\r\n newmodel : :ref:`Model`\r\n New model object taking the combined parameter set and returning a response that is a linear\r\n combination of the input models.\r\n \"\"\"\r\n return _combinemodels('lincombine',*inputmodels,addweights=addweights)\r\n#==============================================================================================\r\n\r\n\r\n#==============================================================================================\r\ndef relate(model,**functions):\r\n \"\"\"\r\n Create functional relationships between model parameters. \r\n\r\n Parameters\r\n ----------\r\n model : :ref:`Model`\r\n Model object. \r\n functions : keyword-callable pairs \r\n Functions describing the relationship between parameters. The keyword represents the parameter\r\n which will be funtionalized. The keyword argument must be a callable function taking a number \r\n of model parameters (names must match any of the model parameter names) as input and returning\r\n the value to be assigned to the functionalized parameter. For example::\r\n\r\n newmodel = relate(model, parA = lambda parB: 2*parB)\r\n\r\n will create a new model ``newmodel`` based on ``model`` where the parameter ``parA`` is now \r\n given by twice the value of parameter ``parB``. The model ``newmodel`` will no longer have ``parA`` \r\n as a model parameter and will have a parameter less than ``model``. \r\n Multiple parameters can be functionalized by specifying multiple keyword-callable pairs.\r\n\r\n Returns\r\n -------\r\n newmodel : :ref:`Model`\r\n New model object without the functionalized parameters. \r\n \"\"\"\r\n def _relate(model,function,dependent_name):\r\n # --------------------------------------------------------------------- \r\n \r\n # Get a list of parameter names in the model\r\n model_parameters =np.array(model._parameter_list(order='vector'))\r\n\r\n # Get the index of the parameter which will be made dependent\r\n dependent_idx = np.where(model_parameters==dependent_name)\r\n\r\n # Get the names and indices of the parameters taken by the dependent's function\r\n arguments_names = inspect.getfullargspec(function).args\r\n\r\n if dependent_name not in model_parameters:\r\n raise KeyError(f\"The assigned parameter '{dependent_name}' is not a parameter of the input model.\")\r\n\r\n if getattr(model,dependent_name).linear:\r\n raise TypeError(f\"Linear parameters cannot be used.\")\r\n\r\n for arg in arguments_names:\r\n if arg not in model_parameters:\r\n raise KeyError(f\"The function argument '{arg}' is not a parameter of the input model.\")\r\n if getattr(model,arg).linear:\r\n raise TypeError(f\"Linear parameters cannot be used.\")\r\n\r\n param_idx = 0\r\n # Loop over all parameters in the model\r\n for param in model_parameters:\r\n Nidx = len(np.atleast_1d(getattr(model,param).idx))\r\n # Update the index of the parameter in the new vector \r\n if not np.all(getattr(model,param).linear):\r\n getattr(model,param).idx = np.array(param_idx) \r\n else: \r\n getattr(model,param).idx = np.arange(param_idx,param_idx+Nidx)\r\n\r\n if param != dependent_name:\r\n param_idx += Nidx\r\n\r\n # If the parameter is to be linked...\r\n Nremove = len(np.atleast_1d(getattr(model,dependent_name).idx))\r\n # Update the number of parameters in the model\r\n model.Nparam -= Nremove\r\n if np.all(getattr(model,dependent_name).linear):\r\n model.Nlin -= Nremove\r\n else: \r\n model.Nnonlin -= Nremove\r\n # Delete the linked parameter from the model\r\n delattr(model,dependent_name)\r\n\r\n # Monkey-patch the evaluation function \r\n nonlinfcn = model.nonlinmodel\r\n Nconstants = len(model._constantsInfo)\r\n nonlinparams = np.array([param for param in model._parameter_list('vector') if not np.all(getattr(model,param).linear)])\r\n arguments_idx = np.concatenate([np.where(nonlinparams==name)[0] for name in arguments_names])\r\n dependent_idx = dependent_idx[0]\r\n # ---------------------------------------------------------------------\r\n def dependency_model_with_constants(*inputargs):\r\n # Redistribute the input parameter vector according to the mapping vector\r\n constants = inputargs[:Nconstants]\r\n θ = np.atleast_1d(inputargs[Nconstants:]).astype(float)\r\n θ = np.insert(θ,dependent_idx,function(*θ[arguments_idx])) \r\n args = list(θ)\r\n if model._constantsInfo is not None:\r\n for info,constant in zip(model._constantsInfo,constants):\r\n args.insert(info['argidx'],constant) \r\n A = nonlinfcn(*args)\r\n return A\r\n # ---------------------------------------------------------------------\r\n model.nonlinmodel = dependency_model_with_constants\r\n\r\n # Return the updated model with the linked parameters\r\n return model\r\n # ---------------------------------------------------------------------\r\n\r\n if not isinstance(model,Model):\r\n raise TypeError('The first argument must be a deerlab.Model object.')\r\n newmodel = deepcopy(model)\r\n\r\n # Get the dependent's names and their function arguments\r\n dependents = [dependent for dependent in functions]\r\n arguments = [inspect.getfullargspec(functions[dependent]).args for dependent in dependents]\r\n\r\n # Update the new model signature\r\n for arg in [item for sublist in arguments for item in sublist]:\r\n if arg in newmodel.signature: \r\n newmodel.signature.remove(arg)\r\n\r\n # Check and correct the order to of functionalization\r\n maxtrials = 2*len(dependents)\r\n # Run for a maximum number of trials\r\n for _ in range(maxtrials):\r\n # Loop over all dependent variables... \r\n for n,dependent in enumerate(dependents):\r\n # ...and check wheter there is a conflict with the other arguments\r\n for m in range(n,len(arguments)):\r\n # If a dependent is to be used as an argument later, there is a conflict\r\n conflict = dependent in arguments[m]\r\n if conflict: break\r\n if conflict: break\r\n else: \r\n # If there are no conflicts, proceed with the functionalization\r\n break\r\n # If there is a conflict, swap the order of the dependents and check again\r\n dependents[n],dependents[m] = dependents[m],dependents[n] \r\n arguments[n],arguments[m] = arguments[m],arguments[n] \r\n else: \r\n # If no solution could be found (due to cyclic relations), raise an error\r\n raise RuntimeError('There are cyclic relationships in the parameter definitions that could not be resolved.')\r\n\r\n # Loop over all parameter functionalizations...\r\n for dependent in dependents: \r\n # ...and functionalize them one by one\r\n newmodel = _relate(newmodel,functions[dependent],dependent)\r\n\r\n return newmodel\r\n#==============================================================================================\r\n" ]
[ [ "numpy.diag", "numpy.expand_dims", "numpy.asarray", "numpy.squeeze", "numpy.concatenate", "numpy.all", "numpy.any", "numpy.where", "numpy.testing.assert_equal", "numpy.hstack", "numpy.unique", "numpy.arange", "numpy.linalg.slogdet", "numpy.full", "numpy.atleast_1d", "numpy.finfo", "numpy.insert", "scipy.sparse.construct.block_diag", "numpy.zeros", "numpy.full_like", "numpy.atleast_2d", "numpy.log10", "scipy.optimize.fminbound", "numpy.array", "numpy.sum", "numpy.shape", "numpy.isinf" ] ]
loosolab/Datenanalyse-2021
[ "2a94f6153a504bd6f1ee205eeeab279b20fb847d" ]
[ "wp3/DefiningTF.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 20 19:51:13 2022\n\n@author: Moritz Hobein\n\"\"\"\nimport pandas as pd\nimport argparse\n\n#parser for using the tool via command-line interface\ndef cliParser():\n parser = argparse.ArgumentParser(description='Script to extract the transcription factors that define a cluster (by z score of TOBIAS footprinting score')\n parser.add_argument('File', nargs=1, help='input file, tsv of binding scores')\n parser.add_argument('-o', '--outputName', dest='Custom_filename', default=\"importantTFsPerCluster.tsv\", help='Sets a custom name for saving the output files')\n parser.add_argument('-q', '--quantile', dest='Quantile', default=.95, help='Sets quantile of what the defining TFs are. Default is .95 (give out the top 5% of TFs by z score)')\n args = parser.parse_args()\n \n return args.File, args.Quantile, args.Custom_filename\n\ndef getDefiningTF(inputFile, quantile, outname):\n \n #counting the header lines to ignore them in data extraction later\n with open(inputFile, \"r\") as file:\n \n headerCount = 0\n \n for line in file:\n \n if line.startswith(\"#\") or line.startswith(\"!\"):\n pass\n elif line.startswith(\"TF\"):\n break\n headerCount += 1\n \n #reading the input file whole ignoring the perviously established header\n df = pd.read_csv(inputFile, sep=\"\\t\", header = headerCount)\n df.set_index('TF', inplace=True)\n\n clusters = {}\n \n #extracting the top TFs by z score according to the set quantile\n quants = df.quantile(quantile)\n \n for col in df:\n clusters[col] = []\n\n \n for col in df:\n for index in df.index:\n if df.at[index, col] >= quants[col]:\n clusters[col].append(index)\n \n #creating the output\n important_TFs = pd.DataFrame(clusters)\n \n important_TFs.to_csv(outname, sep=\"\\t\")\n \ndef main():\n \n File, quantile, outname = cliParser()\n inputFile = File[0]\n\n getDefiningTF(inputFile, quantile, outname)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
maslankam/Photometric-Stereo-Assistent
[ "e3af46c8a984df10f37747b77630cdb5125e0b7f" ]
[ "Include/project/light_model.py" ]
[ "from Include.project.segment import Segment\nfrom Include.project.image_reader import ImageReader\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pathlib import Path\nfrom mpl_toolkits.mplot3d import Axes3D\nimport shutil\n\nimport os, cv2, csv, math\n\n\nclass LightModel(Segment):\n \"\"\"LightModel segment reponsible for light sources procesing\"\"\"\n\n def __init__(self, project):\n self.content = {'mask': None, 'images': None, 'lights': None}\n self.project = project\n self.state = {'data': False, 'save': False}\n\n if 'lights.csv' in os.listdir(project.directories['lights']):\n self.state['save'] = True\n self._read_lights()\n\n def __str__(self):\n return 'Lights:'\n\n def __del__(self):\n pass\n\n def import_from(self, path):\n \"\"\"Importing table of lights from exterior path\"\"\"\n self._read_lights(path)\n\n def save(self):\n \"\"\"Saving table of lights in project directory\"\"\"\n if self.content['lights'] is None:\n print(\"Nothing to save\")\n return\n\n if 'lights.csv' in os.listdir(self.project.directories['lights']):\n os.remove(str(self.project.directories['lights']) + '/lights.csv')\n\n _path = Path(str(self.project.directories['lights']) + '/lights.csv')\n\n with open(_path, 'a', newline='') as _file:\n for light in self.content['lights']:\n writer = csv.writer(_file, delimiter=',')\n data = [[str(light[0]), str(light[1]), str(light[2])]]\n writer.writerows(data)\n\n self.state['save'] = True\n\n def show(self, representation = 'both'):\n \"\"\"\"Showing representation of light model\"\"\"\n if self.content['lights'] is None:\n print('Nothing to show')\n return\n\n def show2D(vectors):\n colors = ['r', 'g', 'b', 'y']\n BASE = []\n x = []\n y = []\n\n for vector in vectors:\n BASE.append(0)\n x.append(vector[0])\n y.append(vector[1])\n\n plt.quiver(BASE, BASE, x, y, color=colors, angles='xy', scale_units='xy',\n scale=1)\n\n LIM = max(np.max(x), np.max(y))\n\n plt.xlim(-LIM, LIM)\n plt.ylim(-LIM, LIM)\n plt.grid(b=True, which='major', axis='both')\n plt.show()\n\n def show3D(vectors):\n U, V, W = zip(*vectors)\n\n X = np.zeros((1, len(V)))\n Y = np.zeros((1, len(V)))\n Z = np.zeros((1, len(V)))\n print('U', U)\n print('V', V)\n print('W', W)\n print('X', X)\n print('Y', Y)\n print('Z', Z)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.quiver(X, Y, Z, U, V, W, color=['r'])\n ax.set_xlim([-0.5, 0.5])\n ax.set_ylim([0, 1.0])\n ax.set_zlim([0, 1])\n plt.show()\n\n if representation == 'both' or representation == '2D':\n show2D(self.content['lights'])\n\n if representation == 'both' or representation == '3D':\n show3D(self.content['lights'])\n\n def compute(self):\n \"\"\"Computing light model from photos in project/Lights directory\"\"\"\n # TODO: use Template Pattern\n def compute_centroid(image, threshold=125):\n \"\"\"\n Computing centroid of given image.\n Please notice that this function can't check if given image is\n continuous blob, image must be preprocessed.\n\n :param image:\n :param threshold:\n :return: {x, y}\n \"\"\"\n xsum = 0\n ysum = 0\n count = 0\n for xy, value in np.ndenumerate(image):\n if value > threshold:\n xsum = xsum + xy[0]\n ysum = ysum + xy[1]\n count = count + 1\n return [xsum / count, ysum / count]\n\n def compute_radius(mask, threshold=125):\n area = 0\n for xy, value in np.ndenumerate(mask):\n if value > threshold:\n area = area + 1\n return math.sqrt(float(area) / math.pi)\n\n if os.listdir(self.project.directories['lights']) == [] or os.listdir(self.project.directories['lights']) == ['lights.csv']:\n print('No images to process')\n return\n\n\n self._read_images()\n\n cx, cy = compute_centroid(self.content['mask'])\n r = compute_radius(self.content['mask'])\n\n light_vector = []\n for image in self.content['images']:\n px, py = compute_centroid(image)\n\n ny = -(px - cx)\n nx = py - cy\n nz = math.sqrt(r ** 2 - nx ** 2 - ny ** 2)\n\n N = np.array([nx / r, ny / r, nz / r])\n R = np.array([0, 0, 1])\n\n L = 2 * (np.dot(N, R) * N) - R\n\n light_vector.append(L)\n\n self.content['lights'] = light_vector\n\n def _read_images(self):\n \"\"\"\n Returning np.array of images and mask using cv2.imread()\n\n images are always provided as GRAYSCALE\n\n mask image as GRAYSCALE\n\n :param self:\n :return: [mask_image, bgr_images, grayscale_images]\n \"\"\"\n\n reader = ImageReader()\n\n _path = self.project.directories['lights']\n\n extension = '.png'\n images_dict = reader.read(_path, extension)\n images = []\n\n _mask_readed = False\n for name in images_dict:\n name_list = name.split(\".\")\n\n if len(name_list) != 3:\n print(\"Image with invalid format: \", name)\n return\n\n if str(name_list[1]) == 'mask':\n if not _mask_readed:\n self.content['mask'] = images_dict[name]\n _mask_readed = True\n else:\n print(\"Only one mask allowed\")\n return\n else:\n try:\n number = int(name_list[1])\n except ValueError as e:\n print(name, ' Not a integer')\n return\n\n if 0 <= int(name_list[1]) <= 99:\n images.append(images_dict[name])\n\n self.content['images'] = images[:]\n self.state['data'] = True\n\n def _read_lights(self, path = None):\n # TODO: Abstraction e.g. LightReader\n '''\n Reading header file with specify format and returning\n number of images, path to images and mask.\n\n Format of header file:\n\n n\n img.0.png\n img.1.png\n ...\n img.n.png\n img.mask.png\n\n Where:\n n - number of files\n img - name of photographed object\n\n\n :param self\n :return: {images, mask, count}:\n\n\n '''\n if path is None:\n path = str(self.project.directories['lights']) + '/lights.csv'\n\n path = Path(path)\n\n if str(path)[-4:] != '.csv':\n print('Not a .csv file')\n return\n\n try:\n csv_file = open(path)\n except FileNotFoundError as e:\n print(\"File Not Found\")\n\n lights = []\n\n with csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n\n count = 0\n for row in csv_reader:\n count += 1\n try:\n lights.append([float(row[0]), float(row[1]), float(row[2])])\n except ValueError as e:\n print('Data in file is not float. First invalid data in row:{}'.format(count))\n return\n except IndexError as e:\n print('Less than 3 cols in row:{}'.format(count))\n return\n\n\n if count >= 3:\n self.content['lights'] = lights\n self.state['data'] = True\n\n else:\n print('Minimum 3 lights required')\n return\n\n" ]
[ [ "numpy.dot", "matplotlib.pyplot.ylim", "numpy.max", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "matplotlib.pyplot.quiver", "numpy.ndenumerate", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
pavansainov15/MatchPrediction
[ "79a3782dae086586ac699947d66c75f7286ede1d" ]
[ "scrape_data.py" ]
[ "import pandas as pd\nfrom bs4 import BeautifulSoup\nimport requests\n\n# For player ranks\nurl = 'http://www.cricmetric.com/ipl/ranks/'\npd.read_html(requests.get(url).content)[-1].to_csv(\"./Dataset/_player_rank.csv\", index=False, header=None)\n\n# Store the sum of EF score by team.\ndata = pd.read_csv('./Dataset/_player_rank.csv')\nteam_rank = pd.DataFrame(\n data.groupby('Team')['EFscore'].agg(['sum']).reset_index().sort_values('sum', ascending=False)).to_csv(\n './Dataset/_team_rank.csv', index=False)\n\n# For news Headlines\nurl = 'https://sports.ndtv.com/indian-premier-league-2018/news'\npage_response = requests.get(url)\npage_content = BeautifulSoup(page_response.content, \"html.parser\")\npd.DataFrame(page_content.find_all(class_='menutitle')).to_csv(\"./Dataset/_news.txt\", index=False, header=None)\nclean_df = pd.read_csv('./Dataset/_news.txt', header=None).replace('<[^>]+>', '', regex=True)\npd.DataFrame(clean_df).to_csv(\"./Dataset/_news.txt\", index=False, header=None)\n\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
AroosaIjaz/Mypennylane
[ "40f2219b5e048d4bd93df815811ca5ed3f5327fa" ]
[ "pennylane/plugins/default_gaussian.py" ]
[ "# Copyright 2018 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# pylint: disable=inconsistent-return-statements\n\"\"\"\n\nDefault Gaussian plugin\n=======================\n\n**Module name:** :mod:`pennylane.plugins.default_gaussian`\n\n**Short name:** ``\"default.gaussian\"``\n\n.. currentmodule:: pennylane.plugins.default_gaussian\n\nThe :code:`default.gaussian` plugin is meant to be used as a template for writing PennyLane\ndevice plugins for new CV backends.\n\nIt implements the necessary :class:`~pennylane._device.Device` methods as well as all built-in\n:mod:`continuous-variable Gaussian operations <pennylane.ops.cv>` and\n:mod:`expectations <pennylane.expval.cv>`, and provides a very simple simulation of a\nGaussian-based quantum circuit architecture.\n\nThe following is the technical documentation of the implementation of the plugin. You will\nnot need to read and understand this to use this plugin.\n\nAuxillary functions\n-------------------\n\n.. autosummary::\n partitions\n fock_prob\n\nGates and operations\n--------------------\n\n.. autosummary::\n rotation\n displacement\n squeezing\n quadratic_phase\n beamsplitter\n two_mode_squeezing\n controlled_addition\n controlled_phase\n interferometer\n\nState preparation\n-----------------\n\n.. autosummary::\n squeezed_cov\n vacuum_state\n coherent_state\n squeezed_state\n displaced_squeezed_state\n thermal_state\n gaussian_state\n set_state\n\n\nExpectations\n------------\n\n.. autosummary::\n photon_number\n homodyne\n poly_quad_expectations\n fock_expectation\n\n\nClasses\n-------\n\n.. autosummary::\n DefaultGaussian\n\nCode details\n^^^^^^^^^^^^\n\"\"\"\n# pylint: disable=attribute-defined-outside-init\nimport numpy as np\n\nfrom scipy.special import factorial as fac\n\nimport pennylane as qml\nfrom pennylane import Device\n\n# tolerance for numerical errors\ntolerance = 1e-10\n\n\n#========================================================\n# auxillary functions\n#========================================================\n\ndef partitions(s, include_singles=True):\n \"\"\"Partitions a sequence into all groupings of pairs and singles of elements.\n\n Args:\n s (sequence): the sequence to partition\n include_singles (bool): if False, only partitions into pairs\n is returned.\n\n Returns:\n tuple: returns a nested tuple, containing all partitions of the sequence.\n \"\"\"\n # pylint: disable=too-many-branches\n if len(s) == 2:\n if include_singles:\n yield (s[0],), (s[1],)\n\n yield tuple(s),\n else:\n # pull off a single item and partition the rest\n if include_singles:\n if len(s) > 1:\n item_partition = (s[0],)\n rest = s[1:]\n rest_partitions = partitions(rest, include_singles)\n for p in rest_partitions:\n yield ((item_partition),) + p\n else:\n yield tuple(s),\n\n # pull off a pair of items and partition the rest\n for idx1 in range(1, len(s)):\n item_partition = (s[0], s[idx1])\n rest = s[1:idx1] + s[idx1+1:]\n rest_partitions = partitions(rest, include_singles)\n for p in rest_partitions:\n yield ((item_partition),) + p\n\n\ndef fock_prob(mu, cov, event, hbar=2.):\n r\"\"\"Returns the probability of detection of a particular PNR detection event.\n\n For more details, see:\n\n * Kruse, R., Hamilton, C. S., Sansoni, L., Barkhofen, S., Silberhorn, C., & Jex, I.\n \"A detailed study of Gaussian Boson Sampling.\" `arXiv:1801.07488. (2018).\n <https://arxiv.org/abs/1801.07488>`_\n\n * Hamilton, C. S., Kruse, R., Sansoni, L., Barkhofen, S., Silberhorn, C., & Jex, I.\n \"Gaussian boson sampling.\" `Physical review letters, 119(17), 170501. (2017).\n <https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.119.170501>`_\n\n Args:\n mu (array): length-:math:`2N` means vector\n cov (array): :math:`2N\\times 2N` covariance matrix\n event (array): length-:math:`N` array of non-negative integers representing the\n PNR detection event of the multi-mode system.\n hbar (float): (default 2) the value of :math:`\\hbar` in the commutation\n relation :math:`[\\x,\\p]=i\\hbar`.\n\n Returns:\n float: probability of detecting the event\n \"\"\"\n # number of modes\n N = len(mu)//2\n I = np.identity(N)\n\n # mean displacement of each mode\n alpha = (mu[:N] + 1j*mu[N:])/np.sqrt(2*hbar)\n # the expectation values (<a_1>, <a_2>,...,<a_N>, <a^\\dagger_1>, ..., <a^\\dagger_N>)\n beta = np.concatenate([alpha, alpha.conj()])\n\n x = cov[:N, :N]*2/hbar\n xp = cov[:N, N:]*2/hbar\n p = cov[N:, N:]*2/hbar\n # the (Hermitian) matrix elements <a_i^\\dagger a_j>\n aidaj = (x+p+1j*(xp-xp.T)-2*I)/4\n # the (symmetric) matrix elements <a_i a_j>\n aiaj = (x-p+1j*(xp+xp.T))/4\n\n # calculate the covariance matrix sigma_Q appearing in the Q function:\n # Q(alpha) = exp[-(alpha-beta).sigma_Q^{-1}.(alpha-beta)/2]/|sigma_Q|\n Q = np.block([[aidaj, aiaj.conj()], [aiaj, aidaj.conj()]]) + np.identity(2*N)\n\n # inverse Q matrix\n Qinv = np.linalg.inv(Q)\n # 1/sqrt(|Q|)\n sqrt_Qdet = 1/np.sqrt(np.linalg.det(Q).real)\n\n prefactor = np.exp(-beta @ Qinv @ beta.conj()/2)\n\n if np.all(np.array(event) == 0):\n # all PNRs detect the vacuum state\n return (prefactor*sqrt_Qdet).real/np.prod(fac(event))\n\n # the matrix X_n = [[0, I_n], [I_n, 0]]\n O = np.zeros_like(I)\n X = np.block([[O, I], [I, O]])\n\n gamma = X @ Qinv.conj() @ beta\n\n # For each mode, repeat the mode number event[i] times\n ind = [i for sublist in [[idx]*j for idx, j in enumerate(event)] for i in sublist]\n # extend the indices for xp-ordering of the Gaussian state\n ind += [i+N for i in ind]\n\n if np.linalg.norm(beta) < tolerance:\n # state has no displacement\n part = partitions(ind, include_singles=False)\n else:\n part = partitions(ind, include_singles=True)\n\n # calculate Hamilton's A matrix: A = X.(I-Q^{-1})*\n A = X @ (np.identity(2*N)-Qinv).conj()\n summation = np.sum([np.prod([gamma[i[0]] if len(i) == 1 else A[i] for i in p]) for p in part])\n\n return (prefactor*sqrt_Qdet*summation).real/np.prod(fac(event))\n\n\n#========================================================\n# parametrized gates\n#========================================================\n\ndef rotation(phi):\n \"\"\"Rotation in the phase space.\n\n Args:\n phi (float): rotation parameter\n\n Returns:\n array: symplectic transformation matrix\n \"\"\"\n return np.array([[np.cos(phi), -np.sin(phi)],\n [np.sin(phi), np.cos(phi)]])\n\n\ndef displacement(state, wire, alpha, hbar=2):\n \"\"\"Displacement in the phase space.\n\n Args:\n state (tuple): contains means vector and covariance matrix\n wire (int): wire that the displacement acts on\n alpha (float): complex displacement\n\n Returns:\n tuple: contains the vector of means and covariance matrix\n \"\"\"\n mu = state[0]\n mu[wire] += alpha.real*np.sqrt(2*hbar)\n mu[wire+len(mu)//2] += alpha.imag*np.sqrt(2*hbar)\n return mu, state[1]\n\n\ndef squeezing(r, phi):\n \"\"\"Squeezing in the phase space.\n\n Args:\n r (float): squeezing magnitude\n phi (float): rotation parameter\n\n Returns:\n array: symplectic transformation matrix\n \"\"\"\n cp = np.cos(phi)\n sp = np.sin(phi)\n ch = np.cosh(r)\n sh = np.sinh(r)\n return np.array([[ch-cp*sh, -sp*sh],\n [-sp*sh, ch+cp*sh]])\n\n\ndef quadratic_phase(s):\n \"\"\"Quadratic phase shift.\n\n Args:\n s (float): gate parameter\n\n Returns:\n array: symplectic transformation matrix\n \"\"\"\n return np.array([[1, 0],\n [s, 1]])\n\n\ndef beamsplitter(theta, phi):\n r\"\"\"Beamsplitter.\n\n Args:\n theta (float): transmittivity angle (:math:`t=\\cos\\theta`)\n phi (float): phase angle (:math:`r=e^{i\\phi}\\sin\\theta`)\n\n Returns:\n array: symplectic transformation matrix\n \"\"\"\n cp = np.cos(phi)\n sp = np.sin(phi)\n ct = np.cos(theta)\n st = np.sin(theta)\n\n S = np.array([[ct, -cp*st, 0, -st*sp],\n [cp*st, ct, -st*sp, 0],\n [0, st*sp, ct, -cp*st],\n [st*sp, 0, cp*st, ct]])\n\n return S\n\n\ndef two_mode_squeezing(r, phi):\n\n \"\"\"Two-mode squeezing.\n\n Args:\n r (float): squeezing magnitude\n phi (float): rotation parameter\n\n Returns:\n array: symplectic transformation matrix\n \"\"\"\n cp = np.cos(phi)\n sp = np.sin(phi)\n ch = np.cosh(r)\n sh = np.sinh(r)\n\n S = np.array([[ch, cp*sh, 0, sp*sh],\n [cp*sh, ch, sp*sh, 0],\n [0, sp*sh, ch, -cp*sh],\n [sp*sh, 0, -cp*sh, ch]])\n\n return S\n\n\ndef controlled_addition(s):\n \"\"\"CX gate.\n\n Args:\n s (float): gate parameter\n\n Returns:\n array: symplectic transformation matrix\n \"\"\"\n S = np.array([[1, 0, 0, 0],\n [s, 1, 0, 0],\n [0, 0, 1, -s],\n [0, 0, 0, 1]])\n\n return S\n\n\ndef controlled_phase(s):\n \"\"\"CZ gate.\n\n Args:\n s (float): gate parameter\n\n Returns:\n array: symplectic transformation matrix\n \"\"\"\n S = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, s, 1, 0],\n [s, 0, 0, 1]])\n\n return S\n\n\ndef interferometer(U):\n \"\"\"Interferometer\n\n Args:\n U (array): unitary matrix\n\n Returns:\n array: symplectic transformation matrix\n \"\"\"\n N = 2*len(U)\n X = U.real\n Y = U.imag\n rows = np.arange(N).reshape(2, -1).T.flatten()\n S = np.vstack([np.hstack([X, -Y]),\n np.hstack([Y, X])])[:, rows][rows]\n\n return S\n\n#========================================================\n# Arbitrary states and operators\n#========================================================\n\ndef squeezed_cov(r, phi, hbar=2):\n r\"\"\"Returns the squeezed covariance matrix of a squeezed state.\n\n Args:\n r (float): the squeezing magnitude\n p (float): the squeezing phase :math:`\\phi`\n hbar (float): (default 2) the value of :math:`\\hbar` in the commutation\n relation :math:`[\\x,\\p]=i\\hbar`\n Returns:\n array: the squeezed state\n \"\"\"\n cov = np.array([[np.exp(-2*r), 0],\n [0, np.exp(2*r)]]) * hbar/2\n\n R = rotation(phi/2)\n\n return R @ cov @ R.T\n\n\ndef vacuum_state(wires, hbar=2.):\n r\"\"\"Returns the vacuum state.\n\n Args:\n basis (str): Returns the vector of means and the covariance matrix\n hbar (float): (default 2) the value of :math:`\\hbar` in the commutation\n relation :math:`[\\x,\\p]=i\\hbar`\n Returns:\n array: the vacuum state\n \"\"\"\n means = np.zeros((2*wires))\n cov = np.identity(2*wires) * hbar/2\n state = [means, cov]\n return state\n\n\ndef coherent_state(a, phi=0, hbar=2.):\n r\"\"\"Returns a coherent state.\n\n Args:\n a (complex) : the displacement\n phi (float): the phase\n hbar (float): (default 2) the value of :math:`\\hbar` in the commutation\n relation :math:`[\\x,\\p]=i\\hbar`\n Returns:\n array: the coherent state\n \"\"\"\n alpha = a*np.exp(1j*phi)\n means = np.array([alpha.real, alpha.imag]) * np.sqrt(2*hbar)\n cov = np.identity(2) * hbar/2\n state = [means, cov]\n return state\n\n\ndef squeezed_state(r, phi, hbar=2.):\n r\"\"\"Returns a squeezed state.\n\n Args:\n r (float): the squeezing magnitude\n phi (float): the squeezing phase :math:`\\phi`\n hbar (float): (default 2) the value of :math:`\\hbar` in the commutation\n relation :math:`[\\x,\\p]=i\\hbar`\n\n Returns:\n array: the squeezed state\n \"\"\"\n means = np.zeros((2))\n state = [means, squeezed_cov(r, phi, hbar)]\n return state\n\n\ndef displaced_squeezed_state(a, phi_a, r, phi_r, hbar=2.):\n r\"\"\"Returns a squeezed coherent state\n\n Args:\n a (real): the displacement magnitude\n phi_a (real): the displacement phase\n r (float): the squeezing magnitude\n phi_r (float): the squeezing phase :math:`\\phi_r`\n hbar (float): (default 2) the value of :math:`\\hbar` in the commutation\n relation :math:`[\\x,\\p]=i\\hbar`\n\n Returns:\n array: the squeezed coherent state\n \"\"\"\n alpha = a * np.exp(1j*phi_a)\n means = np.array([alpha.real, alpha.imag]) * np.sqrt(2*hbar)\n state = [means, squeezed_cov(r, phi_r, hbar)]\n return state\n\n\ndef thermal_state(nbar, hbar=2.):\n r\"\"\"Returns a thermal state.\n\n Args:\n nbar (float): the mean photon number\n hbar (float): (default 2) the value of :math:`\\hbar` in the commutation\n relation :math:`[\\x,\\p]=i\\hbar`\n\n Returns:\n array: the thermal state\n \"\"\"\n means = np.zeros([2])\n state = [means, (2*nbar+1)*np.identity(2)*hbar/2]\n return state\n\n\ndef gaussian_state(mu, cov, hbar=2.):\n r\"\"\"Returns a Gaussian state.\n\n This is simply a bare wrapper function,\n since the means vector and covariance matrix\n can be passed via the parameters unchanged.\n\n Note that both the means vector and covariance\n matrix should be in :math:`(\\x_1,\\dots, \\x_N, \\p_1, \\dots, \\p_N)`\n ordering.\n\n Args:\n mu (array): vector means. Must be length-:math:`2N`,\n where N is the number of modes\n cov (array): covariance matrix. Must be dimension :math:`2N\\times 2N`,\n where N is the number of modes\n hbar (float): (default 2) the value of :math:`\\hbar` in the commutation\n relation :math:`[\\x,\\p]=i\\hbar`\n\n Returns:\n array: the thermal state\n \"\"\"\n # pylint: disable=unused-argument\n return mu, cov\n\n\ndef set_state(state, wire, mu, cov):\n r\"\"\"Inserts a single mode Gaussian into the\n state representation of the complete system.\n\n Args:\n state (tuple): contains means vector\n and covariance matrix of existing state\n wire (int): wire corresponding to the new Gaussian state\n mu (array): vector of means to insert\n cov (array): covariance matrix to insert\n\n Returns:\n tuple: contains the vector of means and covariance matrix.\n \"\"\"\n mu0 = state[0]\n cov0 = state[1]\n N = len(mu0)//2\n\n # insert the new state into the means vector\n mu0[[wire, wire+N]] = mu\n\n # insert the new state into the covariance matrix\n ind = np.concatenate([np.array([wire]), np.array([wire])+N])\n rows = ind.reshape(-1, 1)\n cols = ind.reshape(1, -1)\n cov0[rows, cols] = cov\n\n return mu0, cov0\n\n\n#========================================================\n# expectations\n#========================================================\n\n\ndef photon_number(mu, cov, wires, params, hbar=2.):\n r\"\"\"Calculates the mean photon number for a given one-mode state.\n\n Args:\n mu (array): length-2 vector of means\n cov (array): :math:`2\\times 2` covariance matrix\n wires (Sequence[int]): wires to calculate the expectation for\n params (None): no parameters are used for this expectation value\n hbar (float): (default 2) the value of :math:`\\hbar` in the commutation\n relation :math:`[\\x,\\p]=i\\hbar`\n\n Returns:\n tuple: contains the photon number expectation and variance\n \"\"\"\n # pylint: disable=unused-argument\n ex = (np.trace(cov) + mu.T @ mu)/(2*hbar) - 1/2\n var = (np.trace(cov @ cov) + 2*mu.T @ cov @ mu)/(2*hbar**2) - 1/4\n return ex, var\n\n\ndef homodyne(phi=None):\n \"\"\"Function factory that returns the Homodyne expectation of a one mode state.\n\n Args:\n phi (float): the default phase space axis to perform the Homodyne measurement\n\n Returns:\n function: A function that accepts a single mode means vector, covariance matrix,\n and phase space angle phi, and returns the quadrature expectation\n value and variance.\n \"\"\"\n if phi is not None:\n def _homodyne(mu, cov, wires, params, hbar=2.):\n \"\"\"Arbitrary angle homodyne expectation.\"\"\"\n # pylint: disable=unused-argument\n rot = rotation(phi)\n muphi = rot.T @ mu\n covphi = rot.T @ cov @ rot\n return muphi[0], covphi[0, 0]\n return _homodyne\n\n def _homodyne(mu, cov, wires, params, hbar=2.):\n \"\"\"Arbitrary angle homodyne expectation.\"\"\"\n # pylint: disable=unused-argument\n rot = rotation(params[0])\n muphi = rot.T @ mu\n covphi = rot.T @ cov @ rot\n return muphi[0], covphi[0, 0]\n return _homodyne\n\n\ndef poly_quad_expectations(mu, cov, wires, params, hbar=2.):\n r\"\"\"Calculates the expectation and variance for an arbitrary\n polynomial of quadrature operators.\n\n Args:\n mu (array): length-2 vector of means\n cov (array): :math:`2\\times 2` covariance matrix\n wires (Sequence[int]): wires to calculate the expectation for\n params (array): a :math:`(2N+1)\\times (2N+1)` array containing the linear\n and quadratic coefficients of the quadrature operators\n :math:`(\\I, \\x_0, \\p_0, \\x_1, \\p_1,\\dots)`\n hbar (float): (default 2) the value of :math:`\\hbar` in the commutation\n relation :math:`[\\x,\\p]=i\\hbar`\n\n Returns:\n tuple: the mean and variance of the quadrature-polynomial observable\n \"\"\"\n Q = params[0]\n N = len(mu)//2\n\n # HACK, we need access to the Poly instance in order to expand the matrix!\n op = qml.expval.PolyXP(Q, wires=wires, do_queue=False)\n Q = op.heisenberg_obs(N)\n\n if Q.ndim == 1:\n d = np.r_[Q[1::2], Q[2::2]]\n return d.T @ mu + Q[0], d.T @ cov @ d\n\n # convert to the (I, x1,x2,..., p1,p2...) ordering\n M = np.vstack((Q[0:1, :], Q[1::2, :], Q[2::2, :]))\n M = np.hstack((M[:, 0:1], M[:, 1::2], M[:, 2::2]))\n d1 = M[1:, 0]\n d2 = M[0, 1:]\n\n A = M[1:, 1:]\n d = d1 + d2\n k = M[0, 0]\n\n d2 = 2*A @ mu + d\n k2 = mu.T @ A @ mu + mu.T @ d + k\n\n ex = np.trace(A @ cov) + k2\n var = 2*np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2\n\n modes = np.arange(2*N).reshape(2, -1).T\n groenewald_correction = np.sum([np.linalg.det(hbar*A[:, m][n]) for m in modes for n in modes])\n var -= groenewald_correction\n\n return ex, var\n\n\ndef fock_expectation(mu, cov, wires, params, hbar=2.):\n r\"\"\"Calculates the expectation and variance of a Fock state probability.\n\n Args:\n mu (array): length-:math:`2N` vector of means\n cov (array): :math:`2N\\times 2N` covariance matrix\n wires (Sequence[int]): wires to calculate the expectation for\n params (Sequence[int]): the Fock state to return the expectation value for\n hbar (float): (default 2) the value of :math:`\\hbar` in the commutation\n relation :math:`[\\x,\\p]=i\\hbar`\n\n Returns:\n tuple: the Fock state expectation and variance\n \"\"\"\n # pylint: disable=unused-argument\n ex = fock_prob(mu, cov, params[0], hbar=hbar)\n\n # var[|n><n|] = E[|n><n|^2] - E[|n><n|]^2 = E[|n><n|] - E[|n><n|]^2\n var = ex - ex**2\n return ex, var\n\n\ndef identity(*_, **__):\n r\"\"\"Returns 1.\n\n Returns:\n tuple: the Fock state expectation and variance\n \"\"\"\n return 1, 0\n\n\n#========================================================\n# device\n#========================================================\n\n\nclass DefaultGaussian(Device):\n r\"\"\"Default Gaussian device for PennyLane.\n\n Args:\n wires (int): the number of modes to initialize the device in\n shots (int): How many times should the circuit be evaluated (or sampled) to estimate\n the expectation values. 0 yields the exact result.\n hbar (float): (default 2) the value of :math:`\\hbar` in the commutation\n relation :math:`[\\x,\\p]=i\\hbar`\n \"\"\"\n name = 'Default Gaussian PennyLane plugin'\n short_name = 'default.gaussian'\n pennylane_requires = '0.3'\n version = '0.3.0'\n author = 'Xanadu Inc.'\n\n _operation_map = {\n 'Beamsplitter': beamsplitter,\n 'ControlledAddition': controlled_addition,\n 'ControlledPhase': controlled_phase,\n 'Displacement': displacement,\n 'QuadraticPhase': quadratic_phase,\n 'Rotation': rotation,\n 'Squeezing': squeezing,\n 'TwoModeSqueezing': two_mode_squeezing,\n 'CoherentState': coherent_state,\n 'DisplacedSqueezedState': displaced_squeezed_state,\n 'SqueezedState': squeezed_state,\n 'ThermalState': thermal_state,\n 'GaussianState': gaussian_state,\n 'Interferometer': interferometer\n }\n\n _expectation_map = {\n 'MeanPhoton': photon_number,\n 'X': homodyne(0),\n 'P': homodyne(np.pi/2),\n 'Homodyne': homodyne(None),\n 'PolyXP': poly_quad_expectations,\n 'NumberState': fock_expectation,\n 'Identity': identity\n }\n\n _circuits = {}\n\n def __init__(self, wires, *, shots=0, hbar=2):\n super().__init__(wires, shots)\n self.eng = None\n self.hbar = hbar\n self.reset()\n\n def pre_apply(self):\n self.reset()\n\n def apply(self, operation, wires, par):\n if operation == 'Displacement':\n self._state = displacement(self._state, wires[0], par[0]*np.exp(1j*par[1]))\n return # we are done here\n\n if operation == 'GaussianState':\n if wires != list(range(self.num_wires)):\n raise ValueError(\"GaussianState means vector or covariance matrix is \"\n \"the incorrect size for the number of subsystems.\")\n self._state = self._operation_map[operation](*par, hbar=self.hbar)\n return # we are done here\n\n if 'State' in operation:\n # set the new device state\n mu, cov = self._operation_map[operation](*par, hbar=self.hbar)\n # state preparations only act on at most 1 subsystem\n self._state = set_state(self._state, wires[0], mu, cov)\n return # we are done here\n\n # get the symplectic matrix\n S = self._operation_map[operation](*par)\n\n # expand the symplectic to act on the proper subsystem\n S = self.expand(S, wires)\n\n # apply symplectic matrix to the means vector\n means = S @ self._state[0]\n # apply symplectic matrix to the covariance matrix\n cov = S @ self._state[1] @ S.T\n\n self._state = [means, cov]\n\n def expand(self, S, wires):\n r\"\"\"Expands a Symplectic matrix S to act on the entire subsystem.\n\n Args:\n S (array): a :math:`2M\\times 2M` Symplectic matrix\n wires (Sequence[int]): the wires of the modes that S acts on\n\n Returns:\n array: the resulting :math:`2N\\times 2N` Symplectic matrix\n \"\"\"\n if self.num_wires == 1:\n # total number of wires is 1, simply return the matrix\n return S\n\n N = self.num_wires\n w = np.asarray(wires)\n\n if np.any(w < 0) or np.any(w >= N) or len(set(w)) != len(w):\n raise ValueError(\"Invalid target subsystems provided in 'wires' argument.\")\n\n M = len(S) // 2\n S2 = np.identity(2 * N)\n\n if M != len(wires):\n raise ValueError('Incorrect number of subsystems for provided operation.')\n\n S2[w.reshape(-1, 1), w.reshape(1, -1)] = S[:M, :M].copy() # XX\n S2[(w + N).reshape(-1, 1), (w + N).reshape(1, -1)] = S[M:, M:].copy() # PP\n S2[w.reshape(-1, 1), (w + N).reshape(1, -1)] = S[:M, M:].copy() # XP\n S2[(w + N).reshape(-1, 1), w.reshape(1, -1)] = S[M:, :M].copy() # PX\n\n return S2\n\n def expval(self, expectation, wires, par):\n mu, cov = self.reduced_state(wires)\n\n ev, var = self._expectation_map[expectation](mu, cov, wires, par, hbar=self.hbar)\n\n if self.shots != 0:\n # estimate the ev\n # use central limit theorem, sample normal distribution once, only ok if n_eval is large\n # (see https://en.wikipedia.org/wiki/Berry%E2%80%93Esseen_theorem)\n ev = np.random.normal(ev, np.sqrt(var / self.shots))\n\n return ev\n\n def reset(self):\n \"\"\"Reset the device\"\"\"\n # init the state vector to |00..0>\n self._state = vacuum_state(self.num_wires, self.hbar)\n\n def reduced_state(self, wires):\n r\"\"\" Returns the vector of means and the covariance matrix of the specified wires.\n\n Args:\n wires (int of Sequence[int]): indices of the requested wires\n\n Returns:\n tuple (means, cov): means is an array containing the vector of means,\n and cov is a square array containing the covariance matrix\n \"\"\"\n if wires == list(range(self.num_wires)):\n # reduced state is full state\n return self._state\n\n # reduce rho down to specified subsystems\n if isinstance(wires, int):\n wires = [wires]\n\n if np.any(np.array(wires) > self.num_wires):\n raise ValueError(\"The specified wires cannot \"\n \"be larger than the number of subsystems.\")\n\n ind = np.concatenate([np.array(wires), np.array(wires)+self.num_wires])\n rows = ind.reshape(-1, 1)\n cols = ind.reshape(1, -1)\n\n return self._state[0][ind], self._state[1][rows, cols]\n\n @property\n def operations(self):\n return set(self._operation_map.keys())\n\n @property\n def expectations(self):\n return set(self._expectation_map.keys())\n" ]
[ [ "numpy.sqrt", "numpy.asarray", "numpy.zeros_like", "numpy.any", "numpy.exp", "numpy.trace", "numpy.hstack", "numpy.arange", "numpy.sin", "numpy.linalg.det", "numpy.block", "scipy.special.factorial", "numpy.zeros", "numpy.cosh", "numpy.linalg.inv", "numpy.identity", "numpy.array", "numpy.cos", "numpy.sinh", "numpy.linalg.norm", "numpy.vstack" ] ]
LiangbeiXu/edm2016
[ "728e3605d1af5113ed75883f11fea2f1271fd427" ]
[ "rnn_prof/run_irt.py" ]
[ "\"\"\"\nScript for running basic online IRT\n\"\"\"\nimport logging\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import sparse as sp\n\nfrom .data.constants import (ITEM_IDX_KEY, TEMPLATE_IDX_KEY, USER_IDX_KEY, CORRECT_KEY,\n CONCEPT_IDX_KEY)\nfrom .data.wrapper import DEFAULT_DATA_OPTS\nfrom .irt import TEST_RESPONSES_KEY, OFFSET_COEFFS_KEY\nfrom .irt.callbacks import ConvergenceCallback\nfrom .irt.learners import OnePOLearner, TwoPOLearner, OnePOHighRT, HIGHER_OFFSET_KEY\nfrom .irt.metrics import Metrics\nfrom .irt.online_cross_validation import get_online_rps\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef get_metrics(correct, rps):\n \"\"\" Compute global PC, MAP Accuracy, AUC validation metrics.\n :param np.ndarray[bool] correct: correctnesses\n :param np.ndarray[float] rps: probability of correct\n :return: global percent correct, MAP accuracy, AUC\n :rtype: dict\n \"\"\"\n correct_hats = rps >= 0.5\n global_acc = np.mean(np.array(correct, dtype=float))\n map_acc = np.mean(np.array(correct_hats == correct, dtype=float))\n auc = Metrics.auc_helper(correct, rps)\n return {'global': global_acc, 'map': map_acc, 'auc': auc}\n\n\ndef compute_theta_idx(train_df, test_df=None, single_concept=True):\n \"\"\"\n Compute theta indices. If single_concept is True, then there is one theta\n per user, and if it is false, there is one theta per user/concept pair.\n\n Training and testing users are assumed disjoint and consecutive.\n\n :param pd.DataFrame train_df: The DataFrame of training data. Should have\n columns labeled `USER_IDX_KEY` and `CONCEPT_IDX_KEY`.\n :param pd.DataFrame|None test_df: The DataFrame of testing data. Should have\n columns labeled `USER_IDX_KEY` and `CONCEPT_IDX_KEY`. Can be None and if\n so is simply ignored.\n :param bool single_concept: Should there be one theta per user (True) or\n one theta per user/concept pair (False)\n :return: Theta indices whose order corresponds to the order of the passed\n data. Training comes before testing.\n :rtype: np.ndarray\n \"\"\"\n if single_concept:\n if test_df is None:\n return train_df[USER_IDX_KEY].values\n else:\n return np.concatenate([train_df[USER_IDX_KEY].values, test_df[USER_IDX_KEY].values])\n else:\n num_users = train_df[USER_IDX_KEY].max() + 1\n if test_df is None:\n train_idx = train_df[USER_IDX_KEY].values + train_df[CONCEPT_IDX_KEY].values * num_users\n return train_idx\n\n num_users = max(num_users, test_df[USER_IDX_KEY].max() + 1)\n train_idx = train_df[USER_IDX_KEY].values + train_df[CONCEPT_IDX_KEY].values * num_users\n test_idx = test_df[USER_IDX_KEY].values + test_df[CONCEPT_IDX_KEY].values * num_users\n return np.concatenate([train_idx, test_idx])\n\n\ndef get_irt_learner(train_df, test_df=None, is_two_po=True,\n single_concept=True, template_precision=None, item_precision=None):\n \"\"\" Make a 1PO or 2PO learner.\n\n :param pd.DataFrame train_df: Train data\n :param pd.DataFrame test_df: Optional test data\n :param bool is_two_po: Whether to make a 2PO learner\n :param bool single_concept: Should we train with a single theta per user (True)\n or a single theta per user per concept (False)\n :param float template_precision: The hierarchical IRT model has a model\n item_difficulty ~ N(template_difficulty, 1.0/item_precision) and\n template_difficulty ~ N(0, 1.0/template_precision). None just ignores\n templates.\n :param float|None item_precision: The precision of the Gaussian prior around items in a\n non-templated model. Or see `template_precision` for the templated case. If None, uses 1.0.\n :return: The learner\n :rtype: BayesNetLearner\n \"\"\"\n correct = train_df[CORRECT_KEY].values.astype(bool)\n item_idx = train_df[ITEM_IDX_KEY].values\n is_held_out = np.zeros(len(train_df), dtype=bool)\n if test_df is not None:\n correct = np.concatenate((correct, test_df[CORRECT_KEY].values.astype(bool)))\n item_idx = np.concatenate((item_idx, test_df[ITEM_IDX_KEY].values))\n is_held_out = np.concatenate((is_held_out, np.ones(len(test_df), dtype=bool)))\n\n student_idx = compute_theta_idx(train_df, test_df=test_df, single_concept=single_concept)\n if not template_precision:\n learner_class = TwoPOLearner if is_two_po else OnePOLearner\n learner = learner_class(correct, student_idx=student_idx, item_idx=item_idx,\n is_held_out=is_held_out, max_iterations=1000,\n callback=ConvergenceCallback())\n for node in learner.nodes.itervalues():\n node.solver_pars.updater.step_size = 0.5\n if item_precision is not None:\n learner.nodes[OFFSET_COEFFS_KEY].cpd.precision = \\\n item_precision * sp.eye(learner.nodes[OFFSET_COEFFS_KEY].data.size)\n LOGGER.info(\"Made a 1PO IRT learner with item precision %f\", item_precision)\n else:\n LOGGER.info(\"Made a 1PO IRT learner with default item precision\")\n else:\n template_idx = train_df[TEMPLATE_IDX_KEY]\n if test_df is not None:\n template_idx = np.concatenate((template_idx, test_df[TEMPLATE_IDX_KEY].values))\n problem_to_template = {item: template for item, template in zip(item_idx, template_idx)}\n problem_to_template = sorted(problem_to_template.items())\n template_idx = np.array([x for _, x in problem_to_template])\n learner = OnePOHighRT(correct, student_idx, item_idx, template_idx,\n is_held_out=is_held_out, max_iterations=1000,\n higher_precision=item_precision,\n callback=ConvergenceCallback())\n if item_precision is not None:\n learner.nodes[HIGHER_OFFSET_KEY].cpd.precision = \\\n template_precision * sp.eye(learner.nodes[HIGHER_OFFSET_KEY].data.size)\n for node in learner.nodes.itervalues():\n node.solver_pars.updater.step_size = 0.5\n LOGGER.info(\"Made a hierarchical IRT learner with item precision %f and template \"\n \"precision %f\", item_precision, template_precision)\n return learner\n\n\ndef irt(data_folds, num_folds, output=None, data_opts=DEFAULT_DATA_OPTS, is_two_po=True,\n single_concept=True, template_precision=None, which_fold=None,\n item_precision=None):\n \"\"\" Run 1PO/2PO IRT and print test-set metrics.\n\n :param iterable data_folds: An iterator over (train, test) data tuples\n :param int num_folds: number of folds\n :param str output: where to store the pickled output of the results\n :param DataOpts data_opts: data pre-processing parameters, to be saved (in the future) with IRT\n outputs. See `data.wrapper` for details and default values.\n :param bool is_two_po: Whether to use the 2PO IRT model\n :param bool single_concept: Should we train with a single concept per user (True)\n or a single concept per user per concept (False)\n :param float template_precision: the precision of the higher-order template variable\n specifying the mean of the item difficulties\n :param int | None which_fold: Specify which of the folds you want to actually process. If None,\n process all folds. Good for naive parallelization.\n :param float|None item_precision: The precision of the Gaussian prior around items in a\n non-templated model. If None, uses 1.0.\n \"\"\"\n if which_fold is not None and not (1 <= which_fold <= num_folds):\n raise ValueError(\"which_fold ({which_fold}) must be between 1 \"\n \"and num_folds({num_folds})\".format(which_fold=which_fold,\n num_folds=num_folds))\n\n np.random.seed(data_opts.seed)\n metrics = pd.DataFrame()\n for fold_num, (train_data, test_data) in enumerate(data_folds):\n fold_num += 1\n if which_fold and fold_num != which_fold:\n continue\n fold_metrics, _, _ = eval_learner(train_data, test_data, is_two_po, fold_num,\n single_concept=single_concept,\n template_precision=template_precision,\n item_precision=item_precision)\n metrics = metrics.append(pd.DataFrame(index=[len(metrics)], data=fold_metrics))\n\n if output:\n metrics.to_pickle(output)\n\n # Print overall results\n LOGGER.info(\"Overall Acc: %.5f AUC: %.5f\", metrics['map'].mean(), metrics['auc'].mean())\n\n\ndef eval_learner(train_data, test_data, is_two_po, fold_num,\n single_concept=True, template_precision=None, item_precision=None):\n \"\"\" Create, train, and cross-validate an IRT learner on a train/test split.\n\n :param pd.DataFrame train_data: training data\n :param pd.DataFrame test_data: testing data for cross-validation (required)\n :param bool is_two_po: Whether to use the 2PO IRT model\n :param int fold_num: fold number (for logging and recording results only)\n :param float template_precision: The hierarchical IRT model has a model\n item_difficulty ~ N(template_difficulty, 1.0/template_precision). None just ignores\n templates.\n :param bool single_concept: Should we train with a single concept per user (True)\n or a single concept per user per concept (False)\n :param float|None item_precision: The precision of the Gaussian prior around items in a\n non-templated model. If None, uses 1.0.\n :return: the validation metrics, predicted RP's, and boolean corrects on the test set\n :rtype: dict, np.ndarray[float], np.ndarray[bool]\n \"\"\"\n LOGGER.info(\"Training %s model, fold %d, (single concept = %s)\",\n '2PO' if is_two_po else '1PO', fold_num, single_concept)\n learner = get_irt_learner(train_data, test_data, is_two_po=is_two_po,\n single_concept=single_concept,\n template_precision=template_precision,\n item_precision=item_precision)\n learner.learn()\n LOGGER.info(\"Performing online cross-validation\")\n prob_correct = get_online_rps(learner, test_data[USER_IDX_KEY].values,\n compute_first_interaction_rps=True)\n\n test_correct = learner.nodes[TEST_RESPONSES_KEY].data\n metrics = get_metrics(test_correct, prob_correct)\n metrics['is_two_po'] = is_two_po\n metrics['fold_num'] = fold_num\n metrics['num_test_interactions'] = len(test_correct)\n LOGGER.info(\"Fold %d: Num Interactions: %d; Test Accuracy: %.5f; Test AUC: %.5f\",\n fold_num, metrics['num_test_interactions'], metrics['map'], metrics['auc'])\n return metrics, prob_correct, test_correct\n" ]
[ [ "numpy.random.seed", "scipy.sparse.eye", "pandas.DataFrame", "numpy.concatenate", "numpy.array" ] ]
epfml/relaysgd
[ "536f809f2a5fed5f5004b3f49857d67462ac89d2", "536f809f2a5fed5f5004b3f49857d67462ac89d2" ]
[ "tasks/deit/models.py", "tasks/utils/non_iid_dirichlet.py" ]
[ "# Copyright (c) 2015-present, Facebook, Inc.\n# All rights reserved.\n# https://github.com/facebookresearch/deit/blob/cb29b5efd522a0ac83d64aa8b41fe27cead3a030/models.py\nimport torch\nimport torch.nn as nn\nfrom functools import partial\n\nfrom timm.models.vision_transformer import VisionTransformer, _cfg\nfrom timm.models.registry import register_model\nfrom timm.models.layers import trunc_normal_\n\n\n__all__ = [\n 'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',\n 'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224',\n 'deit_base_distilled_patch16_224', 'deit_base_patch16_384',\n 'deit_base_distilled_patch16_384',\n]\n\n\nclass DistilledVisionTransformer(VisionTransformer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))\n num_patches = self.patch_embed.num_patches\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim))\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity()\n\n trunc_normal_(self.dist_token, std=.02)\n trunc_normal_(self.pos_embed, std=.02)\n self.head_dist.apply(self._init_weights)\n\n def forward_features(self, x):\n # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py\n # with slight modifications to add the dist_token\n B = x.shape[0]\n x = self.patch_embed(x)\n\n cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n dist_token = self.dist_token.expand(B, -1, -1)\n x = torch.cat((cls_tokens, dist_token, x), dim=1)\n\n x = x + self.pos_embed\n x = self.pos_drop(x)\n\n for blk in self.blocks:\n x = blk(x)\n\n x = self.norm(x)\n return x[:, 0], x[:, 1]\n\n def forward(self, x):\n x, x_dist = self.forward_features(x)\n x = self.head(x)\n x_dist = self.head_dist(x_dist)\n if self.training:\n return x, x_dist\n else:\n # during inference, return the average of both classifier predictions\n return (x + x_dist) / 2\n\n\n@register_model\ndef deit_tiny_patch16_224(pretrained=False, **kwargs):\n model = VisionTransformer(\n patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_small_patch16_224(pretrained=False, **kwargs):\n model = VisionTransformer(\n patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_base_patch16_224(pretrained=False, **kwargs):\n model = VisionTransformer(\n patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):\n model = DistilledVisionTransformer(\n patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_small_distilled_patch16_224(pretrained=False, **kwargs):\n model = DistilledVisionTransformer(\n patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_base_distilled_patch16_224(pretrained=False, **kwargs):\n model = DistilledVisionTransformer(\n patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_base_patch16_384(pretrained=False, **kwargs):\n model = VisionTransformer(\n img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_base_distilled_patch16_384(pretrained=False, **kwargs):\n model = DistilledVisionTransformer(\n img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n", "#%%\nimport math\nimport numpy as np\n\n#%%\ndef distribute_data_dirichlet(\n targets, non_iid_alpha, n_workers, seed=0, num_auxiliary_workers=10\n):\n \"\"\"Code adapted from Tao Lin (partition_data.py)\"\"\"\n random_state = np.random.RandomState(seed=seed)\n\n num_indices = len(targets)\n num_classes = len(np.unique(targets))\n\n indices2targets = np.array(list(enumerate(targets)))\n random_state.shuffle(indices2targets)\n\n # partition indices.\n from_index = 0\n splitted_targets = []\n num_splits = math.ceil(n_workers / num_auxiliary_workers)\n split_n_workers = [\n num_auxiliary_workers\n if idx < num_splits - 1\n else n_workers - num_auxiliary_workers * (num_splits - 1)\n for idx in range(num_splits)\n ]\n split_ratios = [_n_workers / n_workers for _n_workers in split_n_workers]\n for idx, ratio in enumerate(split_ratios):\n to_index = from_index + int(num_auxiliary_workers / n_workers * num_indices)\n splitted_targets.append(\n indices2targets[\n from_index : (num_indices if idx == num_splits - 1 else to_index)\n ]\n )\n from_index = to_index\n\n idx_batch = []\n for _targets in splitted_targets:\n # rebuild _targets.\n _targets = np.array(_targets)\n _targets_size = len(_targets)\n\n # use auxi_workers for this subset targets.\n _n_workers = min(num_auxiliary_workers, n_workers)\n n_workers = n_workers - num_auxiliary_workers\n\n # get the corresponding idx_batch.\n min_size = 0\n while min_size < int(0.50 * _targets_size / _n_workers):\n _idx_batch = [[] for _ in range(_n_workers)]\n for _class in range(num_classes):\n # get the corresponding indices in the original 'targets' list.\n idx_class = np.where(_targets[:, 1] == _class)[0]\n idx_class = _targets[idx_class, 0]\n\n # sampling.\n try:\n proportions = random_state.dirichlet(\n np.repeat(non_iid_alpha, _n_workers)\n )\n # balance\n proportions = np.array(\n [\n p * (len(idx_j) < _targets_size / _n_workers)\n for p, idx_j in zip(proportions, _idx_batch)\n ]\n )\n proportions = proportions / proportions.sum()\n proportions = (np.cumsum(proportions) * len(idx_class)).astype(int)[\n :-1\n ]\n _idx_batch = [\n idx_j + idx.tolist()\n for idx_j, idx in zip(\n _idx_batch, np.split(idx_class, proportions)\n )\n ]\n sizes = [len(idx_j) for idx_j in _idx_batch]\n min_size = min([_size for _size in sizes])\n except ZeroDivisionError:\n pass\n idx_batch += _idx_batch\n return idx_batch\n" ]
[ [ "torch.zeros", "torch.cat", "torch.nn.Linear", "torch.nn.Identity", "torch.hub.load_state_dict_from_url" ], [ "numpy.split", "numpy.unique", "numpy.cumsum", "numpy.repeat", "numpy.array", "numpy.where", "numpy.random.RandomState" ] ]
juliendehos/nevergrad
[ "b31a66bdc883e29a6c8572e341b4b56cc4157a9d" ]
[ "nevergrad/benchmark/test_plotting.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom . import plotting # pylint: disable=wrong-import-position, wrong-import-order\nfrom unittest.mock import patch\nfrom pathlib import Path\nimport typing as tp\nimport pytest\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nfrom nevergrad.common import testing\nfrom . import utils\n\nmatplotlib.use(\"Agg\")\n\n\ndef test_get_winners_df() -> None:\n data = [[\"alg0\", 46424, 0.4], [\"alg1\", 4324546, 0.1], [\"alg1\", 424546, 0.5], [\"alg2\", 1424546, 0.3]]\n df = pd.DataFrame(columns=[\"optimizer_name\", \"blublu\", \"loss\"], data=data)\n all_optimizers = [f\"alg{k}\" for k in range(4)]\n # blublu column is useless, and losses are meaned for each algorithm\n winners = plotting._make_winners_df(df, all_optimizers)\n data = 0.5 * np.identity(4) # self playing is a tie\n # unspecified algo\n data[(3, 3)] = 0 # type: ignore\n # alg1 and alg2 win over alg0 # type: ignore\n data[tuple(zip(*[(1, 0), (2, 0)]))] = 1 # type: ignore\n # alg1 and alg2 are a tie (mean loss .3)\n data[tuple(zip(*[(1, 2), (2, 1)]))] = 0.5 # type: ignore\n expected = pd.DataFrame(index=all_optimizers, columns=all_optimizers, data=data)\n winners.assert_equivalent(expected)\n\n\ndef test_make_sorted_winrates() -> None:\n algos = [f\"alg{k}\" for k in range(4)]\n data = [\n [0, 0, 0, 0], # unplayed\n [0, 2, 4, 4], # all time winner (4 games\n [0, 0, 2, 1], # last\n [0, 0, 3, 2],\n ]\n victories = pd.DataFrame(index=algos, columns=algos, data=data)\n winrates = plotting._make_sorted_winrates_df(victories)\n expected_data = [[0.5, 1, 1, -1.0], [0, 0.5, 0.75, -1], [0, 0.25, 0.5, -1], [0, 0, 0, -1]]\n winrates = winrates.fillna(-1)\n salgos = [f\"alg{k}\" for k in [1, 3, 2, 0]]\n expected = pd.DataFrame(index=salgos, columns=salgos, data=expected_data)\n assert winrates.equals(expected), f\"Expected:\\n{expected}\\nbut got:\\n{winrates}\"\n\n\ndef test_create_plots_from_csv_mocked() -> None:\n df = pd.read_csv(Path(__file__).parent / \"sphere_perf_example.csv\")\n with patch(\"nevergrad.benchmark.plotting.XpPlotter\"):\n with patch(\"nevergrad.benchmark.plotting.FightPlotter\") as fplt:\n plotting.create_plots(df, \"\", max_combsize=1)\n assert fplt.call_count == 6, \"Should be called for all, 2 noise levels and 3 budgets\"\n\n\ndef test_fight_plotter() -> None:\n df = utils.Selector.read_csv(Path(__file__).parent / \"sphere_perf_example.csv\").select(\n optimizer_name=[\"OnePlusOneOptimizer\", \"HaltonSearch\", \"Powell\"]\n )\n winrates = plotting.FightPlotter.winrates_from_selection(df, [\"noise_level\", \"budget\"])\n # check data\n np.testing.assert_array_equal(\n winrates.index, [\"Powell (75.0%)\", \"OnePlusOneOptimizer (58.3%)\", \"Halton (16.7%)\"]\n )\n np.testing.assert_array_equal(\n winrates.columns, [\"Powell (6/6)\", \"OnePlusOneOptimizer (6/6)\", \"HaltonSearch (6/6)\"]\n )\n np.testing.assert_almost_equal(winrates, [[0.5, 0.75, 1], [0.25, 0.5, 1], [0, 0, 0.5]])\n # plot\n plotter = plotting.FightPlotter(winrates)\n with patch(\"matplotlib.pyplot.Figure.savefig\"):\n plotter.save(\"should_not_exist.png\")\n\n\ndef test_xp_plotter() -> None:\n opt = \"OnePlusOneOptimizer\"\n df = utils.Selector.read_csv(Path(__file__).parent / \"sphere_perf_example.csv\").select(\n optimizer_name=[opt]\n )\n unused_data = plotting.XpPlotter.make_data(df, normalized_loss=True)\n data = plotting.XpPlotter.make_data(df)\n # check data\n testing.assert_set_equal(data.keys(), {opt})\n testing.assert_set_equal(data[opt].keys(), {\"budget\", \"loss\", \"loss_std\", \"num_eval\"})\n np.testing.assert_almost_equal(data[opt][\"budget\"], [200, 400, 800])\n np.testing.assert_almost_equal(data[opt][\"loss\"], [0.4811605, 0.3920045, 0.14778369])\n np.testing.assert_almost_equal(data[opt][\"loss_std\"], [0.83034832, 0.73255529, 0.18551625])\n # plot\n with patch(\"matplotlib.pyplot.Figure.tight_layout\"): # avoid warning message\n plotter = plotting.XpPlotter(data, title=\"Title\")\n with patch(\"matplotlib.pyplot.Figure.savefig\"):\n plotter.save(\"should_not_exist.png\")\n\n\ndef test_remove_errors() -> None:\n data = [\n [\"alg0\", 0, 10, np.nan],\n [\"alg2\", np.nan, 30, \"ValueError\"],\n [\"alg1\", 0, 20, \"SomeHandledError\"],\n [\"alg3\", np.nan, 40, \"BlubluError\"],\n ]\n df = pd.DataFrame(columns=[\"optimizer_name\", \"loss\", \"dimension\", \"error\"], data=data)\n with pytest.warns(UserWarning) as w:\n output = plotting.remove_errors(df)\n assert len(w) == 3\n expected = pd.DataFrame(\n columns=[\"optimizer_name\", \"loss\", \"dimension\"], data=[[\"alg0\", 0, 10], [\"alg1\", 0, 20]]\n )\n np.testing.assert_array_equal(output.columns, expected.columns)\n np.testing.assert_array_equal(output.index, expected.index)\n np.testing.assert_array_equal(output, expected)\n assert isinstance(output, plotting.utils.Selector)\n\n\ndef test_remove_nan_value() -> None:\n data = [[\"alg0\", 0, 10, np.nan], [\"alg2\", np.nan, 30, np.nan]]\n df = pd.DataFrame(columns=[\"optimizer_name\", \"loss\", \"dimension\", \"error\"], data=data)\n with pytest.warns(UserWarning) as w:\n output = plotting.remove_errors(df)\n assert len(w) == 1\n expected = pd.DataFrame(columns=[\"optimizer_name\", \"loss\", \"dimension\"], data=[[\"alg0\", 0, 10]])\n np.testing.assert_array_equal(output, expected)\n\n\ndef test_make_style_generator() -> None:\n num = 364\n gen = plotting._make_style_generator()\n output = [next(gen) for _ in range(num)]\n np.testing.assert_equal(output[:5], [\"-ob\", \"--vg\", \":^r\", \"-.<c\", \"->m\"])\n # the following is only out of curiosity\n np.testing.assert_equal(len(set(output)), num) # no repetition\n repeating = next(gen)\n np.testing.assert_equal(repeating, output[0])\n\n\ndef test_name_style() -> None:\n nstyle = plotting.NameStyle()\n np.testing.assert_equal(nstyle[\"blublu\"], \"-ob\")\n np.testing.assert_equal(nstyle[\"plop\"], \"--vg\")\n np.testing.assert_equal(nstyle[\"blublu\"], \"-ob\")\n\n\ndef test_split_long_title() -> None:\n title = \"abcd,efgh\"\n np.testing.assert_equal(plotting.split_long_title(title), title)\n title = \",\".join([\"a\" * 25, \"b\" * 25, \"c\" * 25, \"d\" * 15])\n np.testing.assert_equal(plotting.split_long_title(title), title[:52] + \"\\n\" + title[52:])\n title = \"a\" * 70\n np.testing.assert_equal(plotting.split_long_title(title), title)\n\n\[email protected](\n nothing=([1, 2, 10.0], [1, 2, 10.0]),\n identic=([1, 1, 10.0, 10.0], [0.5, 1.5, 9.5, 10.5]),\n)\ndef test_compute_best_placements(positions: tp.List[float], expected: tp.List[float]) -> None:\n new_positions = plotting.compute_best_placements(positions, min_diff=1.0)\n np.testing.assert_array_equal(new_positions, expected)\n\n\ndef test_merge_parametrization_and_optimizer() -> None:\n df = pd.DataFrame(\n columns=[\"optimizer_name\", \"parametrization\", \"val\"],\n data=[[\"o1\", \"p1\", 1], [\"o1\", \"p2\", 2], [\"o2\", np.nan, 3]],\n )\n out = plotting.merge_optimizer_name_pattern(utils.Selector(df), \"{optimizer_name},{parametrization}\")\n assert isinstance(out, utils.Selector)\n assert out[\"optimizer_name\"].tolist() == [\"o1,p1\", \"o1,p2\", \"o2\"]\n assert out[\"val\"].tolist() == [1, 2, 3]\n # nothing to do\n out = plotting.merge_optimizer_name_pattern(utils.Selector(df), \"\")\n assert out[\"optimizer_name\"].tolist() == [\"o1\", \"o1\", \"o2\"]\n\n\ndef test_normalized_losses() -> None:\n data = [\n [\"alg0\", 0, \"sphere\", 3],\n [\"alg0\", -2, \"sphere\", 3],\n [\"alg2\", 2, \"sphere\", 3],\n [\"alg3\", 12, \"sphere\", 12],\n [\"alg2\", 5, \"sphere\", 12],\n [\"alg4\", 24, \"ellipsoid\", 3],\n [\"alg5\", float(\"inf\"), \"sphere\", 3],\n ]\n df = pd.DataFrame(columns=[\"optimizer_name\", \"loss\", \"func\", \"dimension\"], data=data)\n ndf = plotting.normalized_losses(df, [\"func\", \"dimension\"])\n np.testing.assert_array_equal(ndf.loss, [0.5, 0, 1, 1, 0, 1, float(\"inf\")])\n\n\nif __name__ == \"__main__\":\n # simple example which can be run with:\n # python -m nevergrad.benchmark.test_plotting\n df_test = pd.read_csv(Path(__file__).parent / \"sphere_perf_example.csv\")\n plotting.create_plots(df_test, output_folder=\"\", max_combsize=0)\n" ]
[ [ "numpy.testing.assert_equal", "matplotlib.use", "pandas.DataFrame", "numpy.testing.assert_array_equal", "numpy.testing.assert_almost_equal", "numpy.identity" ] ]
HDI-Project/DataTracer
[ "4bb0906f1716bbcfeb0881cade5d6d47bca90764" ]
[ "datatracer/column_map/basic.py" ]
[ "import logging\n\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom datatracer.column_map.base import ColumnMapSolver\nfrom datatracer.column_map.transformer import Transformer\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass BasicColumnMapSolver(ColumnMapSolver):\n \"\"\"Basic Solver for the data lineage problem of column dependency.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self._model_args = args\n self._model_kwargs = kwargs\n\n def _get_importances(self, X, y):\n model = RandomForestRegressor(*self._model_args, **self._model_kwargs)\n model.fit(X, y)\n\n return model.feature_importances_\n\n def solve(self, tables, foreign_keys, target_table, target_field):\n \"\"\"Find the fields which contributed to the target_field the most.\n\n The output is a dictionary containing the fields that contributed the\n most to the given target field as keys, specified as a tuple containing\n both table name and field name, and the score obtained as values.\n\n Args:\n tables (dict):\n Dict containing table names as input and ``pandas.DataFrames``\n as values.\n foreign_keys (list):\n List of foreign key specifications.\n target_table (str):\n Name of the table that contains the target field.\n target_field (str):\n Name of the target field.\n\n Returns:\n dict:\n Dictionary of field specification tuples and scores.\n \"\"\"\n transformer = Transformer(tables, foreign_keys)\n\n X, y = transformer.forward(target_table, target_field)\n\n importances = self._get_importances(X, y)\n return transformer.backward(importances)\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor" ] ]
Tongzhenguo/bpr-spark
[ "5ee01ba09b2dc7247052296b6097eb63d52f0dca" ]
[ "bpr.py" ]
[ "import random\nfrom tqdm import tqdm\nimport numpy as np\n\n\ndef _gradient_single_point(user_id, prod_id, prod_id_neg,\n user_mat, prod_mat, lambda_reg, alpha):\n\n x_uij = user_mat[user_id].dot(prod_mat[prod_id]) - \\\n user_mat[user_id].dot(prod_mat[prod_id_neg])\n\n step_size = np.exp(-x_uij) / (1 + np.exp(-x_uij)) * alpha\n\n user_mat[user_id] += step_size * \\\n (prod_mat[prod_id] - prod_mat[prod_id_neg]) + \\\n lambda_reg * user_mat[user_id]\n\n prod_mat[prod_id] += step_size * \\\n user_mat[user_id] + lambda_reg * prod_mat[prod_id]\n \n prod_mat[prod_id_neg] -= step_size * \\\n user_mat[user_id] + lambda_reg * prod_mat[prod_id_neg]\n\n\ndef _sample_optimize_partition(ratings, user_mat, prod_mat, num_prods,\n lambda_reg=0.001, alpha=0.1, position=None):\n\n sampled_ratings = random.sample(list(ratings), 20000)\n\n for u, i, j in tqdm(sampled_ratings, position=position, leave=False):\n _gradient_single_point(u, i, j, user_mat, prod_mat, lambda_reg, alpha)\n\n yield user_mat, prod_mat\n\n\ndef optimizeMF(ratings, rank, num_iter=10, num_neg_samples=30):\n \"\"\" Provides a spark-facing non-ditributed version of BPR\n\n Args:\n -----\n ratings: an rdd of (user, item) pairs\n num_iter: number of iterations\n num_neg_samples: how many negative samples to take\n\n Returns:\n --------\n (user_mat, prod_mat)\n \"\"\"\n\n ratings_partitioned = ratings.partitionBy(4).persist()\n\n num_users = ratings_partitioned.map(lambda x: x[0]).max()\n num_prods = ratings_partitioned.map(lambda x: x[1]).max()\n\n user_mat = np.random.uniform(size=(num_users + 1, rank))\n prod_mat = np.random.uniform(size=(num_prods + 1, rank))\n\n for _ in xrange(num_iter):\n\n result = ratings_partitioned.flatMap(\n lambda x: [x] * num_neg_samples\n ).map(\n lambda x: x[:2] + (np.random.randint(num_prods) + 1, )\n ).mapPartitionsWithIndex(\n lambda ix, ratings: _sample_optimize_partition(\n ratings, user_mat, prod_mat, num_prods, position=ix\n )\n ).persist()\n\n num = float(result.count())\n\n user_mat, prod_mat = result.reduce(\n lambda a, b: (a[0] + b[0], a[1] + b[1]))\n\n user_mat /= num\n prod_mat /= num\n\n return (user_mat, prod_mat)\n" ]
[ [ "numpy.random.uniform", "numpy.exp", "numpy.random.randint" ] ]
pierreablin/smica
[ "f56d4fac065f88788a0682e68f0121902601f161" ]
[ "smica/core_smican.py" ]
[ "import numpy as np\n\nfrom sklearn.utils import check_random_state\n\nfrom .core_fitter import CovarianceFitNoise\nfrom .utils import fourier_sampling, itakura\n\neps = 1e-12\n\n\ndef wiener(A, powers, noise_inv):\n '''\n The Wiener filter\n '''\n C = np.linalg.pinv(A.T.dot(noise_inv.dot(A)) +\n np.diag(1. / (powers + eps)))\n return C.dot(A.T.dot(noise_inv))\n\n\nclass SMICAN(object):\n '''\n Core smican procedure: transform in the frequency domain, etc..\n '''\n def __init__(self, X_noise, n_components, freqs, sfreq, rng=None):\n '''\n n_components : number of sources\n freqs : the frequency intervals\n sfreq : sampling frequency\n '''\n self.n_components = n_components\n self.freqs = freqs\n self.sfreq = sfreq\n self.f_scale = 0.5 * (freqs[1:] + freqs[:-1])\n if len(X_noise.shape) == 2:\n C, _, _ = fourier_sampling(X_noise, self.sfreq, self.freqs)\n else:\n n_mat, p, _ = X_noise.shape\n C = np.zeros((len(freqs) - 1, p, p))\n for X in X_noise:\n C += fourier_sampling(X, self.sfreq, self.freqs)[0]\n C /= n_mat\n self.C_noise = C\n self.C_noise_inv = np.array([np.linalg.inv(c) for c in C])\n self.rng = check_random_state(rng)\n\n def fit(self, X, y=None, **kwargs):\n '''\n Fits smica to data X (p x n matrix sampled at fs)\n '''\n self.X = X.copy()\n C, ft, freq_idx = fourier_sampling(X, self.sfreq, self.freqs)\n self.C_ = C\n self.ft_ = ft\n self.freq_idx_ = freq_idx\n covfit = CovarianceFitNoise(self.C_noise, self.n_components,\n self.rng)\n covfit.fit(C, **kwargs)\n self.A_ = covfit.A_\n self.powers_ = covfit.powers_\n return self\n\n def is_matrix(self):\n IS = np.zeros((self.n_components, self.n_components))\n for i in range(self.n_components):\n for j in range(self.n_components):\n IS[i, j] = itakura(self.powers_[:, i], self.powers_[:, j])\n self.IS_ = IS\n return IS\n\n # def compute_approx_covs(self):\n # '''\n # Compute the covariances estimated by the model\n # '''\n # covs_approx = compute_covariances(self.A_, self.powers_,\n # self.C_noise)\n # return covs_approx\n\n def compute_f_div(self, halve=False):\n f = np.zeros((self.n_components, self.n_components))\n for i in range(self.n_components):\n for j in range(self.n_components):\n p1 = self.powers_[:, i]\n p2 = self.powers_[:, j]\n frac = p1 / p2\n if halve:\n frac = frac[:len(frac) // 2]\n f[i, j] = np.mean(frac) * np.mean(1. / frac) - 1.\n self.f_div = f\n return f\n\n def compute_sources(self, X=None, method='wiener'):\n if method == 'wiener':\n if X is None:\n ft = self.ft_\n freq_idx = self.freq_idx_\n else:\n _, ft, freq_idx = fourier_sampling(X, self.sfreq, self.freqs)\n p, n = ft.shape\n ft_sources = 1j * np.zeros((self.n_components, n))\n for j, (C_i, power) in enumerate(zip(self.C_noise_inv,\n self.powers_)):\n sl = np.arange(freq_idx[j], freq_idx[j+1])\n W = wiener(self.A_, power, C_i)\n transf = np.dot(W, ft[:, sl])\n ft_sources[:, sl] = transf\n ft_sources[:, n - sl] = np.conj(transf)\n return np.real(np.fft.ifft(ft_sources))\n elif method == 'pinv':\n if X is None:\n X = self.X\n return np.linalg.pinv(self.A_).dot(X)\n\n def filter(self, X=None, bad_sources=[], method='wiener'):\n S = self.compute_sources(X, method=method)\n S[bad_sources] = 0.\n return np.dot(self.A_, S)\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.conj", "numpy.linalg.inv", "numpy.arange", "numpy.fft.ifft", "numpy.linalg.pinv", "numpy.mean", "numpy.zeros", "sklearn.utils.check_random_state" ] ]
xuyu0010/ARID_v1
[ "b03d0975f41547e8aa78929b8e26a62248f8e18f" ]
[ "network/inception_v1_i3d.py" ]
[ "import logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport os\n# from mmcv.cnn import constant_init, kaiming_init\n# from mmcv.runner import load_checkpoint\n\n# from ...registry import BACKBONES\n# from mmaction.ops.reflection_pad3d import reflection_pad3d\n\ntry:\n from . import initializer\n from .utils import load_state\n from .utils import SimpleSpatialTemporalModule, ClsHead\nexcept: \n import initializer\n from utils import load_state\n from utils import SimpleSpatialTemporalModule, ClsHead\n\n\n__all__ = ['InceptionV1_I3D']\n\n# @BACKBONES.register_module\nclass InceptionV1_I3D(nn.Module):\n\n ## TODO:\n ## Refactor it into a more modular way\n ## Reference: Table 1 from https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Szegedy_Going_Deeper_With_2015_CVPR_paper.pdf\n\n def __init__(self,\n pretrained=None,\n bn_eval=True,\n bn_frozen=False,\n partial_bn=False,\n modality='RGB',\n num_classes=11):\n super(InceptionV1_I3D, self).__init__()\n\n self.pretrained = pretrained\n self.bn_eval = bn_eval\n self.bn_frozen = bn_frozen\n self.partial_bn = partial_bn\n self.modality = modality\n\n inplace = True\n assert modality in ['RGB', 'Flow']\n if modality == 'RGB':\n self.conv1_7x7_s2 = nn.Conv3d(3, 64, kernel_size=(7, 7, 7), stride=(2, 2, 2), padding=(0, 0, 0), bias=False)\n else:\n self.conv1_7x7_s2 = nn.Conv3d(2, 64, kernel_size=(7, 7, 7), stride=(2, 2, 2), padding=(0, 0, 0), bias=False)\n self.conv1_7x7_s2_bn = nn.BatchNorm3d(64, eps=1e-05, affine=True)\n self.conv1_relu_7x7 = nn.ReLU(inplace)\n self.pool1_3x3_s2 = nn.MaxPool3d((1, 3, 3), stride=(1, 2, 2), dilation=(1, 1, 1), ceil_mode=True)\n self.conv2_3x3_reduce = nn.Conv3d(64, 64, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.conv2_3x3_reduce_bn = nn.BatchNorm3d(64, eps=1e-05, affine=True)\n self.conv2_relu_3x3_reduce = nn.ReLU(inplace)\n self.conv2_3x3 = nn.Conv3d(64, 192, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.conv2_3x3_bn = nn.BatchNorm3d(192, eps=1e-05, affine=True)\n self.conv2_relu_3x3 = nn.ReLU(inplace)\n self.pool2_3x3_s2 = nn.MaxPool3d((1, 3, 3), stride=(1, 2, 2), dilation=(1, 1, 1), ceil_mode=True)\n\n ##########\n self.inception_3a_1x1 = nn.Conv3d(192, 64, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_3a_1x1_bn = nn.BatchNorm3d(64, eps=1e-05, affine=True)\n self.inception_3a_relu_1x1 = nn.ReLU(inplace)\n\n self.inception_3a_branch1_3x3_reduce = nn.Conv3d(192, 96, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_3a_branch1_3x3_reduce_bn = nn.BatchNorm3d(96, eps=1e-05, affine=True)\n self.inception_3a_branch1_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_3a_branch1_3x3 = nn.Conv3d(96, 128, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_3a_branch1_3x3_bn = nn.BatchNorm3d(128, eps=1e-05, affine=True)\n self.inception_3a_branch1_relu_3x3 = nn.ReLU(inplace)\n\n self.inception_3a_branch2_3x3_reduce = nn.Conv3d(192, 16, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_3a_branch2_3x3_reduce_bn = nn.BatchNorm3d(16, eps=1e-05, affine=True)\n self.inception_3a_branch2_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_3a_branch2_3x3 = nn.Conv3d(16, 32, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_3a_branch2_3x3_bn = nn.BatchNorm3d(32, eps=1e-05, affine=True)\n self.inception_3a_branch2_relu_3x3 = nn.ReLU(inplace)\n\n # self.inception_3a_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)\n self.inception_3a_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True)\n self.inception_3a_pool_proj = nn.Conv3d(192, 32, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_3a_pool_proj_bn = nn.BatchNorm3d(32, eps=1e-05, affine=True)\n self.inception_3a_relu_pool_proj = nn.ReLU(inplace)\n\n self.inception_3b_1x1 = nn.Conv3d(256, 128, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_3b_1x1_bn = nn.BatchNorm3d(128, eps=1e-05, affine=True)\n self.inception_3b_relu_1x1 = nn.ReLU(inplace)\n\n self.inception_3b_branch1_3x3_reduce = nn.Conv3d(256, 128, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_3b_branch1_3x3_reduce_bn = nn.BatchNorm3d(128, eps=1e-05, affine=True)\n self.inception_3b_branch1_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_3b_branch1_3x3 = nn.Conv3d(128, 192, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_3b_branch1_3x3_bn = nn.BatchNorm3d(192, eps=1e-05, affine=True)\n self.inception_3b_branch1_relu_3x3 = nn.ReLU(inplace)\n\n self.inception_3b_branch2_3x3_reduce = nn.Conv3d(256, 32, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_3b_branch2_3x3_reduce_bn = nn.BatchNorm3d(32, eps=1e-05, affine=True)\n self.inception_3b_branch2_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_3b_branch2_3x3 = nn.Conv3d(32, 96, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_3b_branch2_3x3_bn = nn.BatchNorm3d(96, eps=1e-05, affine=True)\n self.inception_3b_branch2_relu_3x3 = nn.ReLU(inplace)\n\n # self.inception_3b_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)\n self.inception_3b_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True)\n self.inception_3b_pool_proj = nn.Conv3d(256, 64, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_3b_pool_proj_bn = nn.BatchNorm3d(64, eps=1e-05, affine=True)\n self.inception_3b_relu_pool_proj = nn.ReLU(inplace)\n\n self.inception_3c_pool = nn.MaxPool3d((3, 3, 3), stride=(2, 2, 2), dilation=(1, 1, 1), ceil_mode=True)\n\n ##########\n self.inception_4a_1x1 = nn.Conv3d(480, 192, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4a_1x1_bn = nn.BatchNorm3d(192, eps=1e-05, affine=True)\n self.inception_4a_relu_1x1 = nn.ReLU(inplace)\n\n self.inception_4a_branch1_3x3_reduce = nn.Conv3d(480, 96, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4a_branch1_3x3_reduce_bn = nn.BatchNorm3d(96, eps=1e-05, affine=True)\n self.inception_4a_branch1_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_4a_branch1_3x3 = nn.Conv3d(96, 208, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_4a_branch1_3x3_bn = nn.BatchNorm3d(208, eps=1e-05, affine=True)\n self.inception_4a_branch1_relu_3x3 = nn.ReLU(inplace)\n\n self.inception_4a_branch2_3x3_reduce = nn.Conv3d(480, 16, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4a_branch2_3x3_reduce_bn = nn.BatchNorm3d(16, eps=1e-05, affine=True)\n self.inception_4a_branch2_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_4a_branch2_3x3 = nn.Conv3d(16, 48, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_4a_branch2_3x3_bn = nn.BatchNorm3d(48, eps=1e-05, affine=True)\n self.inception_4a_branch2_relu_3x3 = nn.ReLU(inplace)\n\n # self.inception_4a_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)\n self.inception_4a_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True)\n self.inception_4a_pool_proj = nn.Conv3d(480, 64, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4a_pool_proj_bn = nn.BatchNorm3d(64, eps=1e-05, affine=True)\n self.inception_4a_relu_pool_proj = nn.ReLU(inplace)\n\n self.inception_4b_1x1 = nn.Conv3d(512, 160, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4b_1x1_bn = nn.BatchNorm3d(160, eps=1e-05, affine=True)\n self.inception_4b_relu_1x1 = nn.ReLU(inplace)\n\n self.inception_4b_branch1_3x3_reduce = nn.Conv3d(512, 112, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4b_branch1_3x3_reduce_bn = nn.BatchNorm3d(112, eps=1e-05, affine=True)\n self.inception_4b_branch1_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_4b_branch1_3x3 = nn.Conv3d(112, 224, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_4b_branch1_3x3_bn = nn.BatchNorm3d(224, eps=1e-05, affine=True)\n self.inception_4b_branch1_relu_3x3 = nn.ReLU(inplace)\n\n self.inception_4b_branch2_3x3_reduce = nn.Conv3d(512, 24, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4b_branch2_3x3_reduce_bn = nn.BatchNorm3d(24, eps=1e-05, affine=True)\n self.inception_4b_branch2_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_4b_branch2_3x3 = nn.Conv3d(24, 64, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_4b_branch2_3x3_bn = nn.BatchNorm3d(64, eps=1e-05, affine=True)\n self.inception_4b_branch2_relu_3x3 = nn.ReLU(inplace)\n\n # self.inception_4b_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)\n self.inception_4b_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True)\n self.inception_4b_pool_proj = nn.Conv3d(512, 64, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4b_pool_proj_bn = nn.BatchNorm3d(64, eps=1e-05, affine=True)\n self.inception_4b_relu_pool_proj = nn.ReLU(inplace)\n\n self.inception_4c_1x1 = nn.Conv3d(512, 128, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4c_1x1_bn = nn.BatchNorm3d(128, eps=1e-05, affine=True)\n self.inception_4c_relu_1x1 = nn.ReLU(inplace)\n\n self.inception_4c_branch1_3x3_reduce = nn.Conv3d(512, 128, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4c_branch1_3x3_reduce_bn = nn.BatchNorm3d(128, eps=1e-05, affine=True)\n self.inception_4c_branch1_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_4c_branch1_3x3 = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_4c_branch1_3x3_bn = nn.BatchNorm3d(256, eps=1e-05, affine=True)\n self.inception_4c_branch1_relu_3x3 = nn.ReLU(inplace)\n\n self.inception_4c_branch2_3x3_reduce = nn.Conv3d(512, 24, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4c_branch2_3x3_reduce_bn = nn.BatchNorm3d(24, eps=1e-05, affine=True)\n self.inception_4c_branch2_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_4c_branch2_3x3 = nn.Conv3d(24, 64, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_4c_branch2_3x3_bn = nn.BatchNorm3d(64, eps=1e-05, affine=True)\n self.inception_4c_branch2_relu_3x3 = nn.ReLU(inplace)\n\n # self.inception_4c_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)\n self.inception_4c_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True)\n self.inception_4c_pool_proj = nn.Conv3d(512, 64, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4c_pool_proj_bn = nn.BatchNorm3d(64, eps=1e-05, affine=True)\n self.inception_4c_relu_pool_proj = nn.ReLU(inplace)\n\n self.inception_4d_1x1 = nn.Conv3d(512, 112, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4d_1x1_bn = nn.BatchNorm3d(112, eps=1e-05, affine=True)\n self.inception_4d_relu_1x1 = nn.ReLU(inplace)\n\n self.inception_4d_branch1_3x3_reduce = nn.Conv3d(512, 144, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4d_branch1_3x3_reduce_bn = nn.BatchNorm3d(144, eps=1e-05, affine=True)\n self.inception_4d_branch1_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_4d_branch1_3x3 = nn.Conv3d(144, 288, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_4d_branch1_3x3_bn = nn.BatchNorm3d(288, eps=1e-05, affine=True)\n self.inception_4d_branch1_relu_3x3 = nn.ReLU(inplace)\n\n self.inception_4d_branch2_3x3_reduce = nn.Conv3d(512, 32, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4d_branch2_3x3_reduce_bn = nn.BatchNorm3d(32, eps=1e-05, affine=True)\n self.inception_4d_branch2_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_4d_branch2_3x3 = nn.Conv3d(32, 64, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_4d_branch2_3x3_bn = nn.BatchNorm3d(64, eps=1e-05, affine=True)\n self.inception_4d_branch2_relu_3x3 = nn.ReLU(inplace)\n\n # self.inception_4d_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)\n self.inception_4d_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True)\n self.inception_4d_pool_proj = nn.Conv3d(512, 64, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4d_pool_proj_bn = nn.BatchNorm3d(64, eps=1e-05, affine=True)\n self.inception_4d_relu_pool_proj = nn.ReLU(inplace)\n\n self.inception_4e_1x1 = nn.Conv3d(528, 256, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4e_1x1_bn = nn.BatchNorm3d(256, eps=1e-05, affine=True)\n self.inception_4e_relu_1x1 = nn.ReLU(inplace)\n\n self.inception_4e_branch1_3x3_reduce = nn.Conv3d(528, 160, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4e_branch1_3x3_reduce_bn = nn.BatchNorm3d(160, eps=1e-05, affine=True)\n self.inception_4e_branch1_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_4e_branch1_3x3 = nn.Conv3d(160, 320, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_4e_branch1_3x3_bn = nn.BatchNorm3d(320, eps=1e-05, affine=True)\n self.inception_4e_branch1_relu_3x3 = nn.ReLU(inplace)\n\n self.inception_4e_branch2_3x3_reduce = nn.Conv3d(528, 32, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4e_branch2_3x3_reduce_bn = nn.BatchNorm3d(32, eps=1e-05, affine=True)\n self.inception_4e_branch2_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_4e_branch2_3x3 = nn.Conv3d(32, 128, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_4e_branch2_3x3_bn = nn.BatchNorm3d(128, eps=1e-05, affine=True)\n self.inception_4e_branch2_relu_3x3 = nn.ReLU(inplace)\n\n # self.inception_4e_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)\n self.inception_4e_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True)\n self.inception_4e_pool_proj = nn.Conv3d(528, 128, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_4e_pool_proj_bn = nn.BatchNorm3d(128, eps=1e-05, affine=True)\n self.inception_4e_relu_pool_proj = nn.ReLU(inplace)\n\n self.inception_4f_pool = nn.MaxPool3d((2, 2, 2), stride=(2, 2, 2), dilation=(1, 1, 1), ceil_mode=True)\n\n ##########\n self.inception_5a_1x1 = nn.Conv3d(832, 256, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_5a_1x1_bn = nn.BatchNorm3d(256, eps=1e-05, affine=True)\n self.inception_5a_relu_1x1 = nn.ReLU(inplace)\n\n self.inception_5a_branch1_3x3_reduce = nn.Conv3d(832, 160, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_5a_branch1_3x3_reduce_bn = nn.BatchNorm3d(160, eps=1e-05, affine=True)\n self.inception_5a_branch1_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_5a_branch1_3x3 = nn.Conv3d(160, 320, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_5a_branch1_3x3_bn = nn.BatchNorm3d(320, eps=1e-05, affine=True)\n self.inception_5a_branch1_relu_3x3 = nn.ReLU(inplace)\n\n self.inception_5a_branch2_3x3_reduce = nn.Conv3d(832, 32, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_5a_branch2_3x3_reduce_bn = nn.BatchNorm3d(32, eps=1e-05, affine=True)\n self.inception_5a_branch2_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_5a_branch2_3x3 = nn.Conv3d(32, 128, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_5a_branch2_3x3_bn = nn.BatchNorm3d(128, eps=1e-05, affine=True)\n self.inception_5a_branch2_relu_3x3 = nn.ReLU(inplace)\n\n # self.inception_5a_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)\n self.inception_5a_pool = nn.MaxPool3d(3, stride=1, padding=1, ceil_mode=True)\n self.inception_5a_pool_proj = nn.Conv3d(832, 128, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_5a_pool_proj_bn = nn.BatchNorm3d(128, eps=1e-05, affine=True)\n self.inception_5a_relu_pool_proj = nn.ReLU(inplace)\n\n self.inception_5b_1x1 = nn.Conv3d(832, 384, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_5b_1x1_bn = nn.BatchNorm3d(384, eps=1e-05, affine=True)\n self.inception_5b_relu_1x1 = nn.ReLU(inplace)\n\n self.inception_5b_branch1_3x3_reduce = nn.Conv3d(832, 192, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_5b_branch1_3x3_reduce_bn = nn.BatchNorm3d(192, eps=1e-05, affine=True)\n self.inception_5b_branch1_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_5b_branch1_3x3 = nn.Conv3d(192, 384, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_5b_branch1_3x3_bn = nn.BatchNorm3d(384, eps=1e-05, affine=True)\n self.inception_5b_branch1_relu_3x3 = nn.ReLU(inplace)\n\n self.inception_5b_branch2_3x3_reduce = nn.Conv3d(832, 48, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_5b_branch2_3x3_reduce_bn = nn.BatchNorm3d(48, eps=1e-05, affine=True)\n self.inception_5b_branch2_relu_3x3_reduce = nn.ReLU(inplace)\n self.inception_5b_branch2_3x3 = nn.Conv3d(48, 128, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)\n self.inception_5b_branch2_3x3_bn = nn.BatchNorm3d(128, eps=1e-05, affine=True)\n self.inception_5b_branch2_relu_3x3 = nn.ReLU(inplace)\n\n self.inception_5b_pool = nn.MaxPool3d(3, stride=1, padding=1, dilation=1, ceil_mode=True)\n self.inception_5b_pool_proj = nn.Conv3d(832, 128, kernel_size=(1, 1, 1), stride=(1, 1, 1), bias=False)\n self.inception_5b_pool_proj_bn = nn.BatchNorm3d(128, eps=1e-05, affine=True)\n self.inception_5b_relu_pool_proj = nn.ReLU(inplace)\n self.avgpool = SimpleSpatialTemporalModule(spatial_type='avg', temporal_size=2, spatial_size=7)\n self.cls_head = ClsHead(with_avg_pool=False, temporal_feature_size=1, spatial_feature_size=1,dropout_ratio=0.5,\n in_channels=1024,num_classes=num_classes)\n\n\n # def init_weights(self):\n # if isinstance(self.pretrained, str):\n # logger = logging.getLogger()\n # load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n # elif self.pretrained is None:\n # for m in self.modules():\n # if isinstance(m, nn.Conv3d):\n # kaiming_init(m)\n # elif isinstance(m, nn.BatchNorm3d):\n # constant_init(m, 1)\n\n ## Initialization\n initializer.xavier(net=self)\n\n if pretrained:\n pretrained_model=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pretrained/i3d_kinetics_rgb_inception_v1.pth')\n logging.info(\"Network:: graph initialized, loading pretrained model: `{}'\".format(pretrained_model))\n assert os.path.exists(pretrained_model), \"cannot locate: `{}'\".format(pretrained_model)\n pretrained = torch.load(pretrained_model)\n load_state(self, pretrained)\n else:\n logging.info(\"Network:: graph initialized, use random inilization!\")\n\n\n def forward(self, input):\n conv1_7x7_s2_out = self.conv1_7x7_s2(F.pad(input, (2, 4, 2, 4, 2, 4)))\n conv1_7x7_s2_bn_out = self.conv1_7x7_s2_bn(conv1_7x7_s2_out)\n conv1_relu_7x7_out = self.conv1_relu_7x7(conv1_7x7_s2_bn_out)\n pool1_3x3_s2_out = self.pool1_3x3_s2(conv1_7x7_s2_bn_out)\n conv2_3x3_reduce_out = self.conv2_3x3_reduce(pool1_3x3_s2_out)\n conv2_3x3_reduce_bn_out = self.conv2_3x3_reduce_bn(conv2_3x3_reduce_out)\n conv2_relu_3x3_reduce_out = self.conv2_relu_3x3_reduce(conv2_3x3_reduce_bn_out)\n conv2_3x3_out = self.conv2_3x3(conv2_3x3_reduce_bn_out)\n conv2_3x3_bn_out = self.conv2_3x3_bn(conv2_3x3_out)\n conv2_relu_3x3_out = self.conv2_relu_3x3(conv2_3x3_bn_out)\n pool2_3x3_s2_out = self.pool2_3x3_s2(conv2_3x3_bn_out)\n\n inception_3a_1x1_out = self.inception_3a_1x1(pool2_3x3_s2_out)\n inception_3a_1x1_bn_out = self.inception_3a_1x1_bn(inception_3a_1x1_out)\n inception_3a_relu_1x1_out = self.inception_3a_relu_1x1(inception_3a_1x1_bn_out)\n inception_3a_branch1_3x3_reduce_out = self.inception_3a_branch1_3x3_reduce(pool2_3x3_s2_out)\n inception_3a_branch1_3x3_reduce_bn_out = self.inception_3a_branch1_3x3_reduce_bn(inception_3a_branch1_3x3_reduce_out)\n inception_3a_branch1_relu_3x3_reduce_out = self.inception_3a_branch1_relu_3x3_reduce(inception_3a_branch1_3x3_reduce_bn_out)\n inception_3a_branch1_3x3_out = self.inception_3a_branch1_3x3(inception_3a_branch1_3x3_reduce_bn_out)\n inception_3a_branch1_3x3_bn_out = self.inception_3a_branch1_3x3_bn(inception_3a_branch1_3x3_out)\n inception_3a_branch1_relu_3x3_out = self.inception_3a_branch1_relu_3x3(inception_3a_branch1_3x3_bn_out)\n inception_3a_branch2_3x3_reduce_out = self.inception_3a_branch2_3x3_reduce(pool2_3x3_s2_out)\n inception_3a_branch2_3x3_reduce_bn_out = self.inception_3a_branch2_3x3_reduce_bn(inception_3a_branch2_3x3_reduce_out)\n inception_3a_branch2_relu_3x3_reduce_out = self.inception_3a_branch2_relu_3x3_reduce(inception_3a_branch2_3x3_reduce_bn_out)\n inception_3a_branch2_3x3_out = self.inception_3a_branch2_3x3(inception_3a_branch2_3x3_reduce_bn_out)\n inception_3a_branch2_3x3_bn_out = self.inception_3a_branch2_3x3_bn(inception_3a_branch2_3x3_out)\n inception_3a_branch2_relu_3x3_out = self.inception_3a_branch2_relu_3x3(inception_3a_branch2_3x3_bn_out)\n inception_3a_pool_out = self.inception_3a_pool(pool2_3x3_s2_out)\n inception_3a_pool_proj_out = self.inception_3a_pool_proj(inception_3a_pool_out)\n inception_3a_pool_proj_bn_out = self.inception_3a_pool_proj_bn(inception_3a_pool_proj_out)\n inception_3a_relu_pool_proj_out = self.inception_3a_relu_pool_proj(inception_3a_pool_proj_bn_out)\n inception_3a_output_out = torch.cat([inception_3a_1x1_bn_out,inception_3a_branch1_3x3_bn_out,inception_3a_branch2_3x3_bn_out,inception_3a_pool_proj_bn_out], 1)\n\n\n inception_3b_1x1_out = self.inception_3b_1x1(inception_3a_output_out)\n inception_3b_1x1_bn_out = self.inception_3b_1x1_bn(inception_3b_1x1_out)\n inception_3b_relu_1x1_out = self.inception_3b_relu_1x1(inception_3b_1x1_bn_out)\n inception_3b_branch1_3x3_reduce_out = self.inception_3b_branch1_3x3_reduce(inception_3a_output_out)\n inception_3b_branch1_3x3_reduce_bn_out = self.inception_3b_branch1_3x3_reduce_bn(inception_3b_branch1_3x3_reduce_out)\n inception_3b_branch1_relu_3x3_reduce_out = self.inception_3b_branch1_relu_3x3_reduce(inception_3b_branch1_3x3_reduce_bn_out)\n inception_3b_branch1_3x3_out = self.inception_3b_branch1_3x3(inception_3b_branch1_3x3_reduce_bn_out)\n inception_3b_branch1_3x3_bn_out = self.inception_3b_branch1_3x3_bn(inception_3b_branch1_3x3_out)\n inception_3b_branch1_relu_3x3_out = self.inception_3b_branch1_relu_3x3(inception_3b_branch1_3x3_bn_out)\n inception_3b_branch2_3x3_reduce_out = self.inception_3b_branch2_3x3_reduce(inception_3a_output_out)\n inception_3b_branch2_3x3_reduce_bn_out = self.inception_3b_branch2_3x3_reduce_bn(inception_3b_branch2_3x3_reduce_out)\n inception_3b_branch2_relu_3x3_reduce_out = self.inception_3b_branch2_relu_3x3_reduce(inception_3b_branch2_3x3_reduce_bn_out)\n inception_3b_branch2_3x3_out = self.inception_3b_branch2_3x3(inception_3b_branch2_3x3_reduce_bn_out)\n inception_3b_branch2_3x3_bn_out = self.inception_3b_branch2_3x3_bn(inception_3b_branch2_3x3_out)\n inception_3b_branch2_relu_3x3_out = self.inception_3b_branch2_relu_3x3(inception_3b_branch2_3x3_bn_out)\n inception_3b_pool_out = self.inception_3b_pool(inception_3a_output_out)\n inception_3b_pool_proj_out = self.inception_3b_pool_proj(inception_3b_pool_out)\n inception_3b_pool_proj_bn_out = self.inception_3b_pool_proj_bn(inception_3b_pool_proj_out)\n inception_3b_relu_pool_proj_out = self.inception_3b_relu_pool_proj(inception_3b_pool_proj_bn_out)\n inception_3b_output_out = torch.cat([inception_3b_1x1_bn_out,inception_3b_branch1_3x3_bn_out,inception_3b_branch2_3x3_bn_out,inception_3b_pool_proj_bn_out], 1)\n\n\n inception_3c_pool_out = self.inception_3c_pool(inception_3b_output_out)\n\n inception_4a_1x1_out = self.inception_4a_1x1(inception_3c_pool_out)\n inception_4a_1x1_bn_out = self.inception_4a_1x1_bn(inception_4a_1x1_out)\n inception_4a_relu_1x1_out = self.inception_4a_relu_1x1(inception_4a_1x1_bn_out)\n inception_4a_branch1_3x3_reduce_out = self.inception_4a_branch1_3x3_reduce(inception_3c_pool_out)\n inception_4a_branch1_3x3_reduce_bn_out = self.inception_4a_branch1_3x3_reduce_bn(inception_4a_branch1_3x3_reduce_out)\n inception_4a_branch1_relu_3x3_reduce_out = self.inception_4a_branch1_relu_3x3_reduce(inception_4a_branch1_3x3_reduce_bn_out)\n inception_4a_branch1_3x3_out = self.inception_4a_branch1_3x3(inception_4a_branch1_3x3_reduce_bn_out)\n inception_4a_branch1_3x3_bn_out = self.inception_4a_branch1_3x3_bn(inception_4a_branch1_3x3_out)\n inception_4a_branch1_relu_3x3_out = self.inception_4a_branch1_relu_3x3(inception_4a_branch1_3x3_bn_out)\n inception_4a_branch2_3x3_reduce_out = self.inception_4a_branch2_3x3_reduce(inception_3c_pool_out)\n inception_4a_branch2_3x3_reduce_bn_out = self.inception_4a_branch2_3x3_reduce_bn(inception_4a_branch2_3x3_reduce_out)\n inception_4a_branch2_relu_3x3_reduce_out = self.inception_4a_branch2_relu_3x3_reduce(inception_4a_branch2_3x3_reduce_bn_out)\n inception_4a_branch2_3x3_out = self.inception_4a_branch2_3x3(inception_4a_branch2_3x3_reduce_bn_out)\n inception_4a_branch2_3x3_bn_out = self.inception_4a_branch2_3x3_bn(inception_4a_branch2_3x3_out)\n inception_4a_branch2_relu_3x3_out = self.inception_4a_branch2_relu_3x3(inception_4a_branch2_3x3_bn_out)\n inception_4a_pool_out = self.inception_4a_pool(inception_3c_pool_out)\n inception_4a_pool_proj_out = self.inception_4a_pool_proj(inception_4a_pool_out)\n inception_4a_pool_proj_bn_out = self.inception_4a_pool_proj_bn(inception_4a_pool_proj_out)\n inception_4a_relu_pool_proj_out = self.inception_4a_relu_pool_proj(inception_4a_pool_proj_bn_out)\n inception_4a_output_out = torch.cat([inception_4a_1x1_bn_out,inception_4a_branch1_3x3_bn_out,inception_4a_branch2_3x3_bn_out,inception_4a_pool_proj_bn_out], 1)\n\n inception_4b_1x1_out = self.inception_4b_1x1(inception_4a_output_out)\n inception_4b_1x1_bn_out = self.inception_4b_1x1_bn(inception_4b_1x1_out)\n inception_4b_relu_1x1_out = self.inception_4b_relu_1x1(inception_4b_1x1_bn_out)\n inception_4b_branch1_3x3_reduce_out = self.inception_4b_branch1_3x3_reduce(inception_4a_output_out)\n inception_4b_branch1_3x3_reduce_bn_out = self.inception_4b_branch1_3x3_reduce_bn(inception_4b_branch1_3x3_reduce_out)\n inception_4b_branch1_relu_3x3_reduce_out = self.inception_4b_branch1_relu_3x3_reduce(inception_4b_branch1_3x3_reduce_bn_out)\n inception_4b_branch1_3x3_out = self.inception_4b_branch1_3x3(inception_4b_branch1_3x3_reduce_bn_out)\n inception_4b_branch1_3x3_bn_out = self.inception_4b_branch1_3x3_bn(inception_4b_branch1_3x3_out)\n inception_4b_branch1_relu_3x3_out = self.inception_4b_branch1_relu_3x3(inception_4b_branch1_3x3_bn_out)\n inception_4b_branch2_3x3_reduce_out = self.inception_4b_branch2_3x3_reduce(inception_4a_output_out)\n inception_4b_branch2_3x3_reduce_bn_out = self.inception_4b_branch2_3x3_reduce_bn(inception_4b_branch2_3x3_reduce_out)\n inception_4b_branch2_relu_3x3_reduce_out = self.inception_4b_branch2_relu_3x3_reduce(inception_4b_branch2_3x3_reduce_bn_out)\n inception_4b_branch2_3x3_out = self.inception_4b_branch2_3x3(inception_4b_branch2_3x3_reduce_bn_out)\n inception_4b_branch2_3x3_bn_out = self.inception_4b_branch2_3x3_bn(inception_4b_branch2_3x3_out)\n inception_4b_branch2_relu_3x3_out = self.inception_4b_branch2_relu_3x3(inception_4b_branch2_3x3_bn_out)\n inception_4b_pool_out = self.inception_4b_pool(inception_4a_output_out)\n inception_4b_pool_proj_out = self.inception_4b_pool_proj(inception_4b_pool_out)\n inception_4b_pool_proj_bn_out = self.inception_4b_pool_proj_bn(inception_4b_pool_proj_out)\n inception_4b_relu_pool_proj_out = self.inception_4b_relu_pool_proj(inception_4b_pool_proj_bn_out)\n inception_4b_output_out = torch.cat([inception_4b_1x1_bn_out,inception_4b_branch1_3x3_bn_out,inception_4b_branch2_3x3_bn_out,inception_4b_pool_proj_bn_out], 1)\n\n inception_4c_1x1_out = self.inception_4c_1x1(inception_4b_output_out)\n inception_4c_1x1_bn_out = self.inception_4c_1x1_bn(inception_4c_1x1_out)\n inception_4c_relu_1x1_out = self.inception_4c_relu_1x1(inception_4c_1x1_bn_out)\n inception_4c_branch1_3x3_reduce_out = self.inception_4c_branch1_3x3_reduce(inception_4b_output_out)\n inception_4c_branch1_3x3_reduce_bn_out = self.inception_4c_branch1_3x3_reduce_bn(inception_4c_branch1_3x3_reduce_out)\n inception_4c_branch1_relu_3x3_reduce_out = self.inception_4c_branch1_relu_3x3_reduce(inception_4c_branch1_3x3_reduce_bn_out)\n inception_4c_branch1_3x3_out = self.inception_4c_branch1_3x3(inception_4c_branch1_3x3_reduce_bn_out)\n inception_4c_branch1_3x3_bn_out = self.inception_4c_branch1_3x3_bn(inception_4c_branch1_3x3_out)\n inception_4c_branch1_relu_3x3_out = self.inception_4c_branch1_relu_3x3(inception_4c_branch1_3x3_bn_out)\n inception_4c_branch2_3x3_reduce_out = self.inception_4c_branch2_3x3_reduce(inception_4b_output_out)\n inception_4c_branch2_3x3_reduce_bn_out = self.inception_4c_branch2_3x3_reduce_bn(inception_4c_branch2_3x3_reduce_out)\n inception_4c_branch2_relu_3x3_reduce_out = self.inception_4c_branch2_relu_3x3_reduce(inception_4c_branch2_3x3_reduce_bn_out)\n inception_4c_branch2_3x3_out = self.inception_4c_branch2_3x3(inception_4c_branch2_3x3_reduce_bn_out)\n inception_4c_branch2_3x3_bn_out = self.inception_4c_branch2_3x3_bn(inception_4c_branch2_3x3_out)\n inception_4c_branch2_relu_3x3_out = self.inception_4c_branch2_relu_3x3(inception_4c_branch2_3x3_bn_out)\n inception_4c_pool_out = self.inception_4c_pool(inception_4b_output_out)\n inception_4c_pool_proj_out = self.inception_4c_pool_proj(inception_4c_pool_out)\n inception_4c_pool_proj_bn_out = self.inception_4c_pool_proj_bn(inception_4c_pool_proj_out)\n inception_4c_relu_pool_proj_out = self.inception_4c_relu_pool_proj(inception_4c_pool_proj_bn_out)\n inception_4c_output_out = torch.cat([inception_4c_1x1_bn_out,inception_4c_branch1_3x3_bn_out,inception_4c_branch2_3x3_bn_out,inception_4c_pool_proj_bn_out], 1)\n\n inception_4d_1x1_out = self.inception_4d_1x1(inception_4c_output_out)\n inception_4d_1x1_bn_out = self.inception_4d_1x1_bn(inception_4d_1x1_out)\n inception_4d_relu_1x1_out = self.inception_4d_relu_1x1(inception_4d_1x1_bn_out)\n inception_4d_branch1_3x3_reduce_out = self.inception_4d_branch1_3x3_reduce(inception_4c_output_out)\n inception_4d_branch1_3x3_reduce_bn_out = self.inception_4d_branch1_3x3_reduce_bn(inception_4d_branch1_3x3_reduce_out)\n inception_4d_branch1_relu_3x3_reduce_out = self.inception_4d_branch1_relu_3x3_reduce(inception_4d_branch1_3x3_reduce_bn_out)\n inception_4d_branch1_3x3_out = self.inception_4d_branch1_3x3(inception_4d_branch1_3x3_reduce_bn_out)\n inception_4d_branch1_3x3_bn_out = self.inception_4d_branch1_3x3_bn(inception_4d_branch1_3x3_out)\n inception_4d_branch1_relu_3x3_out = self.inception_4d_branch1_relu_3x3(inception_4d_branch1_3x3_bn_out)\n inception_4d_branch2_3x3_reduce_out = self.inception_4d_branch2_3x3_reduce(inception_4c_output_out)\n inception_4d_branch2_3x3_reduce_bn_out = self.inception_4d_branch2_3x3_reduce_bn(inception_4d_branch2_3x3_reduce_out)\n inception_4d_branch2_relu_3x3_reduce_out = self.inception_4d_branch2_relu_3x3_reduce(inception_4d_branch2_3x3_reduce_bn_out)\n inception_4d_branch2_3x3_out = self.inception_4d_branch2_3x3(inception_4d_branch2_3x3_reduce_bn_out)\n inception_4d_branch2_3x3_bn_out = self.inception_4d_branch2_3x3_bn(inception_4d_branch2_3x3_out)\n inception_4d_branch2_relu_3x3_out = self.inception_4d_branch2_relu_3x3(inception_4d_branch2_3x3_bn_out)\n inception_4d_pool_out = self.inception_4d_pool(inception_4c_output_out)\n inception_4d_pool_proj_out = self.inception_4d_pool_proj(inception_4d_pool_out)\n inception_4d_pool_proj_bn_out = self.inception_4d_pool_proj_bn(inception_4d_pool_proj_out)\n inception_4d_relu_pool_proj_out = self.inception_4d_relu_pool_proj(inception_4d_pool_proj_bn_out)\n inception_4d_output_out = torch.cat([inception_4d_1x1_bn_out,inception_4d_branch1_3x3_bn_out,inception_4d_branch2_3x3_bn_out,inception_4d_pool_proj_bn_out], 1)\n\n inception_4e_1x1_out = self.inception_4e_1x1(inception_4d_output_out)\n inception_4e_1x1_bn_out = self.inception_4e_1x1_bn(inception_4e_1x1_out)\n inception_4e_relu_1x1_out = self.inception_4e_relu_1x1(inception_4e_1x1_bn_out)\n inception_4e_branch1_3x3_reduce_out = self.inception_4e_branch1_3x3_reduce(inception_4d_output_out)\n inception_4e_branch1_3x3_reduce_bn_out = self.inception_4e_branch1_3x3_reduce_bn(inception_4e_branch1_3x3_reduce_out)\n inception_4e_branch1_relu_3x3_reduce_out = self.inception_4e_branch1_relu_3x3_reduce(inception_4e_branch1_3x3_reduce_bn_out)\n inception_4e_branch1_3x3_out = self.inception_4e_branch1_3x3(inception_4e_branch1_3x3_reduce_bn_out)\n inception_4e_branch1_3x3_bn_out = self.inception_4e_branch1_3x3_bn(inception_4e_branch1_3x3_out)\n inception_4e_branch1_relu_3x3_out = self.inception_4e_branch1_relu_3x3(inception_4e_branch1_3x3_bn_out)\n inception_4e_branch2_3x3_reduce_out = self.inception_4e_branch2_3x3_reduce(inception_4d_output_out)\n inception_4e_branch2_3x3_reduce_bn_out = self.inception_4e_branch2_3x3_reduce_bn(inception_4e_branch2_3x3_reduce_out)\n inception_4e_branch2_relu_3x3_reduce_out = self.inception_4e_branch2_relu_3x3_reduce(inception_4e_branch2_3x3_reduce_bn_out)\n inception_4e_branch2_3x3_out = self.inception_4e_branch2_3x3(inception_4e_branch2_3x3_reduce_bn_out)\n inception_4e_branch2_3x3_bn_out = self.inception_4e_branch2_3x3_bn(inception_4e_branch2_3x3_out)\n inception_4e_branch2_relu_3x3_out = self.inception_4e_branch2_relu_3x3(inception_4e_branch2_3x3_bn_out)\n inception_4e_pool_out = self.inception_4e_pool(inception_4d_output_out)\n inception_4e_pool_proj_out = self.inception_4e_pool_proj(inception_4e_pool_out)\n inception_4e_pool_proj_bn_out = self.inception_4e_pool_proj_bn(inception_4e_pool_proj_out)\n inception_4e_relu_pool_proj_out = self.inception_4e_relu_pool_proj(inception_4e_pool_proj_bn_out)\n inception_4e_output_out = torch.cat([inception_4e_1x1_bn_out,inception_4e_branch1_3x3_bn_out,inception_4e_branch2_3x3_bn_out,inception_4e_pool_proj_bn_out], 1)\n\n inception_4f_pool_out = self.inception_4f_pool(inception_4e_output_out)\n\n inception_5a_1x1_out = self.inception_5a_1x1(inception_4f_pool_out)\n inception_5a_1x1_bn_out = self.inception_5a_1x1_bn(inception_5a_1x1_out)\n inception_5a_relu_1x1_out = self.inception_5a_relu_1x1(inception_5a_1x1_bn_out)\n inception_5a_branch1_3x3_reduce_out = self.inception_5a_branch1_3x3_reduce(inception_4f_pool_out)\n inception_5a_branch1_3x3_reduce_bn_out = self.inception_5a_branch1_3x3_reduce_bn(inception_5a_branch1_3x3_reduce_out)\n inception_5a_branch1_relu_3x3_reduce_out = self.inception_5a_branch1_relu_3x3_reduce(inception_5a_branch1_3x3_reduce_bn_out)\n inception_5a_branch1_3x3_out = self.inception_5a_branch1_3x3(inception_5a_branch1_3x3_reduce_bn_out)\n inception_5a_branch1_3x3_bn_out = self.inception_5a_branch1_3x3_bn(inception_5a_branch1_3x3_out)\n inception_5a_branch1_relu_3x3_out = self.inception_5a_branch1_relu_3x3(inception_5a_branch1_3x3_bn_out)\n inception_5a_branch2_3x3_reduce_out = self.inception_5a_branch2_3x3_reduce(inception_4f_pool_out)\n inception_5a_branch2_3x3_reduce_bn_out = self.inception_5a_branch2_3x3_reduce_bn(inception_5a_branch2_3x3_reduce_out)\n inception_5a_branch2_relu_3x3_reduce_out = self.inception_5a_branch2_relu_3x3_reduce(inception_5a_branch2_3x3_reduce_bn_out)\n inception_5a_branch2_3x3_out = self.inception_5a_branch2_3x3(inception_5a_branch2_3x3_reduce_bn_out)\n inception_5a_branch2_3x3_bn_out = self.inception_5a_branch2_3x3_bn(inception_5a_branch2_3x3_out)\n inception_5a_branch2_relu_3x3_out = self.inception_5a_branch2_relu_3x3(inception_5a_branch2_3x3_bn_out)\n inception_5a_pool_out = self.inception_5a_pool(inception_4f_pool_out)\n inception_5a_pool_proj_out = self.inception_5a_pool_proj(inception_5a_pool_out)\n inception_5a_pool_proj_bn_out = self.inception_5a_pool_proj_bn(inception_5a_pool_proj_out)\n inception_5a_relu_pool_proj_out = self.inception_5a_relu_pool_proj(inception_5a_pool_proj_bn_out)\n inception_5a_output_out = torch.cat([inception_5a_1x1_bn_out,inception_5a_branch1_3x3_bn_out,inception_5a_branch2_3x3_bn_out,inception_5a_pool_proj_bn_out], 1)\n\n inception_5b_1x1_out = self.inception_5b_1x1(inception_5a_output_out)\n inception_5b_1x1_bn_out = self.inception_5b_1x1_bn(inception_5b_1x1_out)\n inception_5b_relu_1x1_out = self.inception_5b_relu_1x1(inception_5b_1x1_bn_out)\n inception_5b_branch1_3x3_reduce_out = self.inception_5b_branch1_3x3_reduce(inception_5a_output_out)\n inception_5b_branch1_3x3_reduce_bn_out = self.inception_5b_branch1_3x3_reduce_bn(inception_5b_branch1_3x3_reduce_out)\n inception_5b_branch1_relu_3x3_reduce_out = self.inception_5b_branch1_relu_3x3_reduce(inception_5b_branch1_3x3_reduce_bn_out)\n inception_5b_branch1_3x3_out = self.inception_5b_branch1_3x3(inception_5b_branch1_3x3_reduce_bn_out)\n inception_5b_branch1_3x3_bn_out = self.inception_5b_branch1_3x3_bn(inception_5b_branch1_3x3_out)\n inception_5b_branch1_relu_3x3_out = self.inception_5b_branch1_relu_3x3(inception_5b_branch1_3x3_bn_out)\n inception_5b_branch2_3x3_reduce_out = self.inception_5b_branch2_3x3_reduce(inception_5a_output_out)\n inception_5b_branch2_3x3_reduce_bn_out = self.inception_5b_branch2_3x3_reduce_bn(inception_5b_branch2_3x3_reduce_out)\n inception_5b_branch2_relu_3x3_reduce_out = self.inception_5b_branch2_relu_3x3_reduce(inception_5b_branch2_3x3_reduce_bn_out)\n inception_5b_branch2_3x3_out = self.inception_5b_branch2_3x3(inception_5b_branch2_3x3_reduce_bn_out)\n inception_5b_branch2_3x3_bn_out = self.inception_5b_branch2_3x3_bn(inception_5b_branch2_3x3_out)\n inception_5b_branch2_relu_3x3_out = self.inception_5b_branch2_relu_3x3(inception_5b_branch2_3x3_bn_out)\n inception_5b_pool_out = self.inception_5b_pool(inception_5a_output_out)\n inception_5b_pool_proj_out = self.inception_5b_pool_proj(inception_5b_pool_out)\n inception_5b_pool_proj_bn_out = self.inception_5b_pool_proj_bn(inception_5b_pool_proj_out)\n inception_5b_relu_pool_proj_out = self.inception_5b_relu_pool_proj(inception_5b_pool_proj_bn_out)\n inception_5b_output_out = torch.cat([inception_5b_1x1_bn_out,inception_5b_branch1_3x3_bn_out,inception_5b_branch2_3x3_bn_out,inception_5b_pool_proj_bn_out], 1)\n final_out = self.avgpool(inception_5b_output_out)\n final_out = self.cls_head(final_out)\n \n return final_out\n\nif __name__ == '__main__':\n import torch\n logging.getLogger().setLevel(logging.DEBUG)\n # ---------\n net = InceptionV1_I3D(pretrained=True)\n data = torch.randn(1,3,16,224,224)\n output = net(data)\n print(output.shape)" ]
[ [ "torch.cat", "torch.load", "torch.randn", "torch.nn.MaxPool3d", "torch.nn.Conv3d", "torch.nn.ReLU", "torch.nn.BatchNorm3d", "torch.nn.functional.pad" ] ]
Armandpl/stable-baselines3
[ "59bec3018058300f8892cc12593fcd1bd164ef48" ]
[ "stable_baselines3/common/distributions.py" ]
[ "\"\"\"Probability distributions.\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport gym\nimport torch as th\nfrom gym import spaces\nfrom torch import nn\nfrom torch.distributions import Bernoulli, Categorical, Normal\n\nfrom stable_baselines3.common.preprocessing import get_action_dim\n\n\nclass Distribution(ABC):\n \"\"\"Abstract base class for distributions.\"\"\"\n\n def __init__(self):\n super(Distribution, self).__init__()\n self.distribution = None\n\n @abstractmethod\n def proba_distribution_net(self, *args, **kwargs) -> Union[nn.Module, Tuple[nn.Module, nn.Parameter]]:\n \"\"\"Create the layers and parameters that represent the distribution.\n\n Subclasses must define this, but the arguments and return type vary between\n concrete classes.\"\"\"\n\n @abstractmethod\n def proba_distribution(self, *args, **kwargs) -> \"Distribution\":\n \"\"\"Set parameters of the distribution.\n\n :return: self\n \"\"\"\n\n @abstractmethod\n def log_prob(self, x: th.Tensor) -> th.Tensor:\n \"\"\"\n Returns the log likelihood\n\n :param x: the taken action\n :return: The log likelihood of the distribution\n \"\"\"\n\n @abstractmethod\n def entropy(self) -> Optional[th.Tensor]:\n \"\"\"\n Returns Shannon's entropy of the probability\n\n :return: the entropy, or None if no analytical form is known\n \"\"\"\n\n @abstractmethod\n def sample(self) -> th.Tensor:\n \"\"\"\n Returns a sample from the probability distribution\n\n :return: the stochastic action\n \"\"\"\n\n @abstractmethod\n def mode(self) -> th.Tensor:\n \"\"\"\n Returns the most likely action (deterministic output)\n from the probability distribution\n\n :return: the stochastic action\n \"\"\"\n\n def get_actions(self, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Return actions according to the probability distribution.\n\n :param deterministic:\n :return:\n \"\"\"\n if deterministic:\n return self.mode()\n return self.sample()\n\n @abstractmethod\n def actions_from_params(self, *args, **kwargs) -> th.Tensor:\n \"\"\"\n Returns samples from the probability distribution\n given its parameters.\n\n :return: actions\n \"\"\"\n\n @abstractmethod\n def log_prob_from_params(self, *args, **kwargs) -> Tuple[th.Tensor, th.Tensor]:\n \"\"\"\n Returns samples and the associated log probabilities\n from the probability distribution given its parameters.\n\n :return: actions and log prob\n \"\"\"\n\n\ndef sum_independent_dims(tensor: th.Tensor) -> th.Tensor:\n \"\"\"\n Continuous actions are usually considered to be independent,\n so we can sum components of the ``log_prob`` or the entropy.\n\n :param tensor: shape: (n_batch, n_actions) or (n_batch,)\n :return: shape: (n_batch,)\n \"\"\"\n if len(tensor.shape) > 1:\n tensor = tensor.sum(dim=1)\n else:\n tensor = tensor.sum()\n return tensor\n\n\nclass DiagGaussianDistribution(Distribution):\n \"\"\"\n Gaussian distribution with diagonal covariance matrix, for continuous actions.\n\n :param action_dim: Dimension of the action space.\n \"\"\"\n\n def __init__(self, action_dim: int):\n super(DiagGaussianDistribution, self).__init__()\n self.action_dim = action_dim\n self.mean_actions = None\n self.log_std = None\n\n def proba_distribution_net(self, latent_dim: int, log_std_init: float = 0.0) -> Tuple[nn.Module, nn.Parameter]:\n \"\"\"\n Create the layers and parameter that represent the distribution:\n one output will be the mean of the Gaussian, the other parameter will be the\n standard deviation (log std in fact to allow negative values)\n\n :param latent_dim: Dimension of the last layer of the policy (before the action layer)\n :param log_std_init: Initial value for the log standard deviation\n :return:\n \"\"\"\n mean_actions = nn.Linear(latent_dim, self.action_dim)\n # TODO: allow action dependent std\n log_std = nn.Parameter(th.ones(self.action_dim) * log_std_init, requires_grad=True)\n return mean_actions, log_std\n\n def proba_distribution(self, mean_actions: th.Tensor, log_std: th.Tensor) -> \"DiagGaussianDistribution\":\n \"\"\"\n Create the distribution given its parameters (mean, std)\n\n :param mean_actions:\n :param log_std:\n :return:\n \"\"\"\n action_std = th.ones_like(mean_actions) * log_std.exp()\n self.distribution = Normal(mean_actions, action_std)\n return self\n\n def log_prob(self, actions: th.Tensor) -> th.Tensor:\n \"\"\"\n Get the log probabilities of actions according to the distribution.\n Note that you must first call the ``proba_distribution()`` method.\n\n :param actions:\n :return:\n \"\"\"\n log_prob = self.distribution.log_prob(actions)\n return sum_independent_dims(log_prob)\n\n def entropy(self) -> th.Tensor:\n return sum_independent_dims(self.distribution.entropy())\n\n def sample(self) -> th.Tensor:\n # Reparametrization trick to pass gradients\n return self.distribution.rsample()\n\n def mode(self) -> th.Tensor:\n return self.distribution.mean\n\n def actions_from_params(self, mean_actions: th.Tensor, log_std: th.Tensor, deterministic: bool = False) -> th.Tensor:\n # Update the proba distribution\n self.proba_distribution(mean_actions, log_std)\n return self.get_actions(deterministic=deterministic)\n\n def log_prob_from_params(self, mean_actions: th.Tensor, log_std: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:\n \"\"\"\n Compute the log probability of taking an action\n given the distribution parameters.\n\n :param mean_actions:\n :param log_std:\n :return:\n \"\"\"\n actions = self.actions_from_params(mean_actions, log_std)\n log_prob = self.log_prob(actions)\n return actions, log_prob\n\n\nclass SquashedDiagGaussianDistribution(DiagGaussianDistribution):\n \"\"\"\n Gaussian distribution with diagonal covariance matrix, followed by a squashing function (tanh) to ensure bounds.\n\n :param action_dim: Dimension of the action space.\n :param epsilon: small value to avoid NaN due to numerical imprecision.\n \"\"\"\n\n def __init__(self, action_dim: int, epsilon: float = 1e-6):\n super(SquashedDiagGaussianDistribution, self).__init__(action_dim)\n # Avoid NaN (prevents division by zero or log of zero)\n self.epsilon = epsilon\n self.gaussian_actions = None\n\n def proba_distribution(self, mean_actions: th.Tensor, log_std: th.Tensor) -> \"SquashedDiagGaussianDistribution\":\n super(SquashedDiagGaussianDistribution, self).proba_distribution(mean_actions, log_std)\n return self\n\n def log_prob(self, actions: th.Tensor, gaussian_actions: Optional[th.Tensor] = None) -> th.Tensor:\n # Inverse tanh\n # Naive implementation (not stable): 0.5 * torch.log((1 + x) / (1 - x))\n # We use numpy to avoid numerical instability\n if gaussian_actions is None:\n # It will be clipped to avoid NaN when inversing tanh\n gaussian_actions = TanhBijector.inverse(actions)\n\n # Log likelihood for a Gaussian distribution\n log_prob = super(SquashedDiagGaussianDistribution, self).log_prob(gaussian_actions)\n # Squash correction (from original SAC implementation)\n # this comes from the fact that tanh is bijective and differentiable\n log_prob -= th.sum(th.log(1 - actions**2 + self.epsilon), dim=1)\n return log_prob\n\n def entropy(self) -> Optional[th.Tensor]:\n # No analytical form,\n # entropy needs to be estimated using -log_prob.mean()\n return None\n\n def sample(self) -> th.Tensor:\n # Reparametrization trick to pass gradients\n self.gaussian_actions = super().sample()\n return th.tanh(self.gaussian_actions)\n\n def mode(self) -> th.Tensor:\n self.gaussian_actions = super().mode()\n # Squash the output\n return th.tanh(self.gaussian_actions)\n\n def log_prob_from_params(self, mean_actions: th.Tensor, log_std: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:\n action = self.actions_from_params(mean_actions, log_std)\n log_prob = self.log_prob(action, self.gaussian_actions)\n return action, log_prob\n\n\nclass CategoricalDistribution(Distribution):\n \"\"\"\n Categorical distribution for discrete actions.\n\n :param action_dim: Number of discrete actions\n \"\"\"\n\n def __init__(self, action_dim: int):\n super(CategoricalDistribution, self).__init__()\n self.action_dim = action_dim\n\n def proba_distribution_net(self, latent_dim: int) -> nn.Module:\n \"\"\"\n Create the layer that represents the distribution:\n it will be the logits of the Categorical distribution.\n You can then get probabilities using a softmax.\n\n :param latent_dim: Dimension of the last layer\n of the policy network (before the action layer)\n :return:\n \"\"\"\n action_logits = nn.Linear(latent_dim, self.action_dim)\n return action_logits\n\n def proba_distribution(self, action_logits: th.Tensor) -> \"CategoricalDistribution\":\n self.distribution = Categorical(logits=action_logits)\n return self\n\n def log_prob(self, actions: th.Tensor) -> th.Tensor:\n return self.distribution.log_prob(actions)\n\n def entropy(self) -> th.Tensor:\n return self.distribution.entropy()\n\n def sample(self) -> th.Tensor:\n return self.distribution.sample()\n\n def mode(self) -> th.Tensor:\n return th.argmax(self.distribution.probs, dim=1)\n\n def actions_from_params(self, action_logits: th.Tensor, deterministic: bool = False) -> th.Tensor:\n # Update the proba distribution\n self.proba_distribution(action_logits)\n return self.get_actions(deterministic=deterministic)\n\n def log_prob_from_params(self, action_logits: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:\n actions = self.actions_from_params(action_logits)\n log_prob = self.log_prob(actions)\n return actions, log_prob\n\n\nclass MultiCategoricalDistribution(Distribution):\n \"\"\"\n MultiCategorical distribution for multi discrete actions.\n\n :param action_dims: List of sizes of discrete action spaces\n \"\"\"\n\n def __init__(self, action_dims: List[int]):\n super(MultiCategoricalDistribution, self).__init__()\n self.action_dims = action_dims\n\n def proba_distribution_net(self, latent_dim: int) -> nn.Module:\n \"\"\"\n Create the layer that represents the distribution:\n it will be the logits (flattened) of the MultiCategorical distribution.\n You can then get probabilities using a softmax on each sub-space.\n\n :param latent_dim: Dimension of the last layer\n of the policy network (before the action layer)\n :return:\n \"\"\"\n\n action_logits = nn.Linear(latent_dim, sum(self.action_dims))\n return action_logits\n\n def proba_distribution(self, action_logits: th.Tensor) -> \"MultiCategoricalDistribution\":\n self.distribution = [Categorical(logits=split) for split in th.split(action_logits, tuple(self.action_dims), dim=1)]\n return self\n\n def log_prob(self, actions: th.Tensor) -> th.Tensor:\n # Extract each discrete action and compute log prob for their respective distributions\n return th.stack(\n [dist.log_prob(action) for dist, action in zip(self.distribution, th.unbind(actions, dim=1))], dim=1\n ).sum(dim=1)\n\n def entropy(self) -> th.Tensor:\n return th.stack([dist.entropy() for dist in self.distribution], dim=1).sum(dim=1)\n\n def sample(self) -> th.Tensor:\n return th.stack([dist.sample() for dist in self.distribution], dim=1)\n\n def mode(self) -> th.Tensor:\n return th.stack([th.argmax(dist.probs, dim=1) for dist in self.distribution], dim=1)\n\n def actions_from_params(self, action_logits: th.Tensor, deterministic: bool = False) -> th.Tensor:\n # Update the proba distribution\n self.proba_distribution(action_logits)\n return self.get_actions(deterministic=deterministic)\n\n def log_prob_from_params(self, action_logits: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:\n actions = self.actions_from_params(action_logits)\n log_prob = self.log_prob(actions)\n return actions, log_prob\n\n\nclass BernoulliDistribution(Distribution):\n \"\"\"\n Bernoulli distribution for MultiBinary action spaces.\n\n :param action_dim: Number of binary actions\n \"\"\"\n\n def __init__(self, action_dims: int):\n super(BernoulliDistribution, self).__init__()\n self.action_dims = action_dims\n\n def proba_distribution_net(self, latent_dim: int) -> nn.Module:\n \"\"\"\n Create the layer that represents the distribution:\n it will be the logits of the Bernoulli distribution.\n\n :param latent_dim: Dimension of the last layer\n of the policy network (before the action layer)\n :return:\n \"\"\"\n action_logits = nn.Linear(latent_dim, self.action_dims)\n return action_logits\n\n def proba_distribution(self, action_logits: th.Tensor) -> \"BernoulliDistribution\":\n self.distribution = Bernoulli(logits=action_logits)\n return self\n\n def log_prob(self, actions: th.Tensor) -> th.Tensor:\n return self.distribution.log_prob(actions).sum(dim=1)\n\n def entropy(self) -> th.Tensor:\n return self.distribution.entropy().sum(dim=1)\n\n def sample(self) -> th.Tensor:\n return self.distribution.sample()\n\n def mode(self) -> th.Tensor:\n return th.round(self.distribution.probs)\n\n def actions_from_params(self, action_logits: th.Tensor, deterministic: bool = False) -> th.Tensor:\n # Update the proba distribution\n self.proba_distribution(action_logits)\n return self.get_actions(deterministic=deterministic)\n\n def log_prob_from_params(self, action_logits: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:\n actions = self.actions_from_params(action_logits)\n log_prob = self.log_prob(actions)\n return actions, log_prob\n\n\nclass StateDependentNoiseDistribution(Distribution):\n \"\"\"\n Distribution class for using generalized State Dependent Exploration (gSDE).\n Paper: https://arxiv.org/abs/2005.05719\n\n It is used to create the noise exploration matrix and\n compute the log probability of an action with that noise.\n\n :param action_dim: Dimension of the action space.\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,)\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this ensures bounds are satisfied.\n :param learn_features: Whether to learn features for gSDE or not.\n This will enable gradients to be backpropagated through the features\n ``latent_sde`` in the code.\n :param epsilon: small value to avoid NaN due to numerical imprecision.\n \"\"\"\n\n def __init__(\n self,\n action_dim: int,\n full_std: bool = True,\n use_expln: bool = False,\n squash_output: bool = False,\n learn_features: bool = False,\n epsilon: float = 1e-6,\n ):\n super(StateDependentNoiseDistribution, self).__init__()\n self.action_dim = action_dim\n self.latent_sde_dim = None\n self.mean_actions = None\n self.log_std = None\n self.weights_dist = None\n self.exploration_mat = None\n self.exploration_matrices = None\n self._latent_sde = None\n self.use_expln = use_expln\n self.full_std = full_std\n self.epsilon = epsilon\n self.learn_features = learn_features\n if squash_output:\n self.bijector = TanhBijector(epsilon)\n else:\n self.bijector = None\n\n def get_std(self, log_std: th.Tensor) -> th.Tensor:\n \"\"\"\n Get the standard deviation from the learned parameter\n (log of it by default). This ensures that the std is positive.\n\n :param log_std:\n :return:\n \"\"\"\n if self.use_expln:\n # From gSDE paper, it allows to keep variance\n # above zero and prevent it from growing too fast\n below_threshold = th.exp(log_std) * (log_std <= 0)\n # Avoid NaN: zeros values that are below zero\n safe_log_std = log_std * (log_std > 0) + self.epsilon\n above_threshold = (th.log1p(safe_log_std) + 1.0) * (log_std > 0)\n std = below_threshold + above_threshold\n else:\n # Use normal exponential\n std = th.exp(log_std)\n\n if self.full_std:\n return std\n # Reduce the number of parameters:\n return th.ones(self.latent_sde_dim, self.action_dim).to(log_std.device) * std\n\n def sample_weights(self, log_std: th.Tensor, batch_size: int = 1) -> None:\n \"\"\"\n Sample weights for the noise exploration matrix,\n using a centered Gaussian distribution.\n\n :param log_std:\n :param batch_size:\n \"\"\"\n std = self.get_std(log_std)\n self.weights_dist = Normal(th.zeros_like(std), std)\n # Reparametrization trick to pass gradients\n self.exploration_mat = self.weights_dist.rsample()\n # Pre-compute matrices in case of parallel exploration\n self.exploration_matrices = self.weights_dist.rsample((batch_size,))\n\n def proba_distribution_net(\n self, latent_dim: int, log_std_init: float = -2.0, latent_sde_dim: Optional[int] = None\n ) -> Tuple[nn.Module, nn.Parameter]:\n \"\"\"\n Create the layers and parameter that represent the distribution:\n one output will be the deterministic action, the other parameter will be the\n standard deviation of the distribution that control the weights of the noise matrix.\n\n :param latent_dim: Dimension of the last layer of the policy (before the action layer)\n :param log_std_init: Initial value for the log standard deviation\n :param latent_sde_dim: Dimension of the last layer of the features extractor\n for gSDE. By default, it is shared with the policy network.\n :return:\n \"\"\"\n # Network for the deterministic action, it represents the mean of the distribution\n mean_actions_net = nn.Linear(latent_dim, self.action_dim)\n # When we learn features for the noise, the feature dimension\n # can be different between the policy and the noise network\n self.latent_sde_dim = latent_dim if latent_sde_dim is None else latent_sde_dim\n # Reduce the number of parameters if needed\n log_std = th.ones(self.latent_sde_dim, self.action_dim) if self.full_std else th.ones(self.latent_sde_dim, 1)\n # Transform it to a parameter so it can be optimized\n log_std = nn.Parameter(log_std * log_std_init, requires_grad=True)\n # Sample an exploration matrix\n self.sample_weights(log_std)\n return mean_actions_net, log_std\n\n def proba_distribution(\n self, mean_actions: th.Tensor, log_std: th.Tensor, latent_sde: th.Tensor\n ) -> \"StateDependentNoiseDistribution\":\n \"\"\"\n Create the distribution given its parameters (mean, std)\n\n :param mean_actions:\n :param log_std:\n :param latent_sde:\n :return:\n \"\"\"\n # Stop gradient if we don't want to influence the features\n self._latent_sde = latent_sde if self.learn_features else latent_sde.detach()\n variance = th.mm(self._latent_sde**2, self.get_std(log_std) ** 2)\n self.distribution = Normal(mean_actions, th.sqrt(variance + self.epsilon))\n return self\n\n def log_prob(self, actions: th.Tensor) -> th.Tensor:\n if self.bijector is not None:\n gaussian_actions = self.bijector.inverse(actions)\n else:\n gaussian_actions = actions\n # log likelihood for a gaussian\n log_prob = self.distribution.log_prob(gaussian_actions)\n # Sum along action dim\n log_prob = sum_independent_dims(log_prob)\n\n if self.bijector is not None:\n # Squash correction (from original SAC implementation)\n log_prob -= th.sum(self.bijector.log_prob_correction(gaussian_actions), dim=1)\n return log_prob\n\n def entropy(self) -> Optional[th.Tensor]:\n if self.bijector is not None:\n # No analytical form,\n # entropy needs to be estimated using -log_prob.mean()\n return None\n return sum_independent_dims(self.distribution.entropy())\n\n def sample(self) -> th.Tensor:\n noise = self.get_noise(self._latent_sde)\n actions = self.distribution.mean + noise\n if self.bijector is not None:\n return self.bijector.forward(actions)\n return actions\n\n def mode(self) -> th.Tensor:\n actions = self.distribution.mean\n if self.bijector is not None:\n return self.bijector.forward(actions)\n return actions\n\n def get_noise(self, latent_sde: th.Tensor) -> th.Tensor:\n latent_sde = latent_sde if self.learn_features else latent_sde.detach()\n # Default case: only one exploration matrix\n if len(latent_sde) == 1 or len(latent_sde) != len(self.exploration_matrices):\n return th.mm(latent_sde, self.exploration_mat)\n # Use batch matrix multiplication for efficient computation\n # (batch_size, n_features) -> (batch_size, 1, n_features)\n latent_sde = latent_sde.unsqueeze(1)\n # (batch_size, 1, n_actions)\n noise = th.bmm(latent_sde, self.exploration_matrices)\n return noise.squeeze(1)\n\n def actions_from_params(\n self, mean_actions: th.Tensor, log_std: th.Tensor, latent_sde: th.Tensor, deterministic: bool = False\n ) -> th.Tensor:\n # Update the proba distribution\n self.proba_distribution(mean_actions, log_std, latent_sde)\n return self.get_actions(deterministic=deterministic)\n\n def log_prob_from_params(\n self, mean_actions: th.Tensor, log_std: th.Tensor, latent_sde: th.Tensor\n ) -> Tuple[th.Tensor, th.Tensor]:\n actions = self.actions_from_params(mean_actions, log_std, latent_sde)\n log_prob = self.log_prob(actions)\n return actions, log_prob\n\n\nclass TanhBijector(object):\n \"\"\"\n Bijective transformation of a probability distribution\n using a squashing function (tanh)\n TODO: use Pyro instead (https://pyro.ai/)\n\n :param epsilon: small value to avoid NaN due to numerical imprecision.\n \"\"\"\n\n def __init__(self, epsilon: float = 1e-6):\n super(TanhBijector, self).__init__()\n self.epsilon = epsilon\n\n @staticmethod\n def forward(x: th.Tensor) -> th.Tensor:\n return th.tanh(x)\n\n @staticmethod\n def atanh(x: th.Tensor) -> th.Tensor:\n \"\"\"\n Inverse of Tanh\n\n Taken from Pyro: https://github.com/pyro-ppl/pyro\n 0.5 * torch.log((1 + x ) / (1 - x))\n \"\"\"\n return 0.5 * (x.log1p() - (-x).log1p())\n\n @staticmethod\n def inverse(y: th.Tensor) -> th.Tensor:\n \"\"\"\n Inverse tanh.\n\n :param y:\n :return:\n \"\"\"\n eps = th.finfo(y.dtype).eps\n # Clip the action to avoid NaN\n return TanhBijector.atanh(y.clamp(min=-1.0 + eps, max=1.0 - eps))\n\n def log_prob_correction(self, x: th.Tensor) -> th.Tensor:\n # Squash correction (from original SAC implementation)\n return th.log(1.0 - th.tanh(x) ** 2 + self.epsilon)\n\n\ndef make_proba_distribution(\n action_space: gym.spaces.Space, use_sde: bool = False, dist_kwargs: Optional[Dict[str, Any]] = None\n) -> Distribution:\n \"\"\"\n Return an instance of Distribution for the correct type of action space\n\n :param action_space: the input action space\n :param use_sde: Force the use of StateDependentNoiseDistribution\n instead of DiagGaussianDistribution\n :param dist_kwargs: Keyword arguments to pass to the probability distribution\n :return: the appropriate Distribution object\n \"\"\"\n if dist_kwargs is None:\n dist_kwargs = {}\n\n if isinstance(action_space, spaces.Box):\n assert len(action_space.shape) == 1, \"Error: the action space must be a vector\"\n cls = StateDependentNoiseDistribution if use_sde else DiagGaussianDistribution\n return cls(get_action_dim(action_space), **dist_kwargs)\n elif isinstance(action_space, spaces.Discrete):\n return CategoricalDistribution(action_space.n, **dist_kwargs)\n elif isinstance(action_space, spaces.MultiDiscrete):\n return MultiCategoricalDistribution(action_space.nvec, **dist_kwargs)\n elif isinstance(action_space, spaces.MultiBinary):\n return BernoulliDistribution(action_space.n, **dist_kwargs)\n else:\n raise NotImplementedError(\n \"Error: probability distribution, not implemented for action space\"\n f\"of type {type(action_space)}.\"\n \" Must be of type Gym Spaces: Box, Discrete, MultiDiscrete or MultiBinary.\"\n )\n\n\ndef kl_divergence(dist_true: Distribution, dist_pred: Distribution) -> th.Tensor:\n \"\"\"\n Wrapper for the PyTorch implementation of the full form KL Divergence\n\n :param dist_true: the p distribution\n :param dist_pred: the q distribution\n :return: KL(dist_true||dist_pred)\n \"\"\"\n # KL Divergence for different distribution types is out of scope\n assert dist_true.__class__ == dist_pred.__class__, \"Error: input distributions should be the same type\"\n\n # MultiCategoricalDistribution is not a PyTorch Distribution subclass\n # so we need to implement it ourselves!\n if isinstance(dist_pred, MultiCategoricalDistribution):\n assert dist_pred.action_dims == dist_true.action_dims, \"Error: distributions must have the same input space\"\n return th.stack(\n [th.distributions.kl_divergence(p, q) for p, q in zip(dist_true.distribution, dist_pred.distribution)],\n dim=1,\n ).sum(dim=1)\n\n # Use the PyTorch kl_divergence implementation\n else:\n return th.distributions.kl_divergence(dist_true.distribution, dist_pred.distribution)\n" ]
[ [ "torch.tanh", "torch.finfo", "torch.mm", "torch.ones", "torch.sqrt", "torch.distributions.Bernoulli", "torch.round", "torch.bmm", "torch.distributions.kl_divergence", "torch.ones_like", "torch.nn.Parameter", "torch.zeros_like", "torch.exp", "torch.nn.Linear", "torch.log", "torch.log1p", "torch.distributions.Normal", "torch.distributions.Categorical", "torch.unbind", "torch.argmax" ] ]
ksh981214/icml18-jtnn
[ "701c363e24e38a47fc0c4509565fb021b5ba91c6" ]
[ "molopt/optimize.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nimport math, random, sys\nfrom optparse import OptionParser\nfrom collections import deque\n\nimport rdkit\nimport rdkit.Chem as Chem\nfrom rdkit.Chem import Descriptors\nimport sascorer\n\nfrom jtnn import *\n\nlg = rdkit.RDLogger.logger() \nlg.setLevel(rdkit.RDLogger.CRITICAL)\n\nparser = OptionParser()\nparser.add_option(\"-t\", \"--test\", dest=\"test_path\")\nparser.add_option(\"-v\", \"--vocab\", dest=\"vocab_path\")\nparser.add_option(\"-m\", \"--model\", dest=\"model_path\")\nparser.add_option(\"-w\", \"--hidden\", dest=\"hidden_size\", default=200)\nparser.add_option(\"-l\", \"--latent\", dest=\"latent_size\", default=56)\nparser.add_option(\"-d\", \"--depth\", dest=\"depth\", default=3)\nparser.add_option(\"-s\", \"--sim\", dest=\"cutoff\", default=0.0)\nopts,args = parser.parse_args()\n \nvocab = [x.strip(\"\\r\\n \") for x in open(opts.vocab_path)] \nvocab = Vocab(vocab)\n\nhidden_size = int(opts.hidden_size)\nlatent_size = int(opts.latent_size)\ndepth = int(opts.depth)\nsim_cutoff = float(opts.cutoff)\n\nmodel = JTPropVAE(vocab, hidden_size, latent_size, depth)\nmodel.load_state_dict(torch.load(opts.model_path))\nmodel = model.cuda()\n\ndata = []\nwith open(opts.test_path) as f:\n for line in f:\n s = line.strip(\"\\r\\n \").split()[0]\n data.append(s)\n\nres = []\nprint(\"Found New SMILES: {}\".format(len(data)))\nfor i,smiles in enumerate(data):\n mol = Chem.MolFromSmiles(smiles)\n score = Descriptors.MolLogP(mol) - sascorer.calculateScore(mol)\n\n new_smiles,sim = model.optimize(smiles, sim_cutoff=sim_cutoff, lr=2, num_iter=80)\n new_mol = Chem.MolFromSmiles(new_smiles)\n new_score = Descriptors.MolLogP(new_mol) - sascorer.calculateScore(new_mol)\n\n res.append( (new_score - score, sim, score, new_score, smiles, new_smiles) )\n print(i)\n print(\"Improvement:{} | Similarity: {}\".format(new_score - score, sim))\n print(\"New y(m): {} | Old y(m): {}\".format(new_score, score))\n print(\"New SMILES: {} | Old: {}\".format(new_smiles, smiles))\n print(\"\")\n\nprint(\"Avg of Improvement :{}\".format(sum([x[0] for x in res])/len(data))) \nprint(\"Avg of Similarity: {}\".format(sum([x[1] for x in res])/len(data)))\nprint(\"Sum of New y(m) :{}\".format(sum([x[3] for x in res])/len(data))) \nprint(\"Sum of Old y(m): {}\".format(sum([x[2] for x in res])/len(data)))\n" ]
[ [ "torch.load" ] ]
dkaramit/ASAP
[ "afade2737b332e7dbf0ea06eb4f31564a478ee40" ]
[ "Artificial_Neural_Networks/python/FeedForwardANN/FFANN_VanillaSGD.py" ]
[ "from numpy import sqrt as np_sqrt\nfrom numpy import abs as np_abs\nfrom .FFANN_SGD import StochasticGradientDescent\n\nclass VanillaSGD(StochasticGradientDescent):\n '''\n Not the best (far from it) strategy, but the simplest. Will use it to test if the implementation works.\n '''\n def __init__(self,loss,alpha=1e-2):\n '''\n loss: the loss function\n alpha: learning rate\n '''\n StochasticGradientDescent.__init__(self,loss)\n self.alpha=alpha\n \n def update(self, data_out,abs_tol=1e-5, rel_tol=1e-3):\n '''\n during the update step, you calculate the gradient of Q\n and update w and b. \n '''\n #The update should run after\n #FFANN.feedForward() and FFANN.backPropagation().\n \n #these will be used to determine if the stopping conditions are satisfied \n _w2=0\n _check=0\n self.Q.randomDataPoint()\n \n for l in range(self.Q.model.total_layers-1):\n for j in range(self.Q.model.nodes[l+1]):\n for i in range(self.Q.model.nodes[l]):\n #get the grad of the loss. The results should be stored in loss.dQdw and loss.dQdb\n #This way it should be easy to update the weights and biases of FFANN\n self.Q.grad(l,j,i)\n dw=self.alpha*self.Q.dQdw\n #update the weight using loss.dQdw\n self.Q.model.addToWeight(l,j,i, -dw)\n\n _w2=abs_tol + np_abs(self.Q.model.weights[l][j][i]) * rel_tol\n _check+=(dw/_w2)*(dw/_w2)\n\n dw=self.alpha*self.Q.dQdb\n #update the bias using loss.dQdb (it is the same for all i, so don't run loss.grad again).\n self.Q.model.addToBias(l,j, -dw)\n \n _w2=abs_tol + np_abs(self.Q.model.biases[l][j]) * rel_tol\n _check+=(dw/_w2)*(dw/_w2)\n \n \n _check=np_sqrt(1./self.Q.N *_check)\n return _check" ]
[ [ "numpy.abs", "numpy.sqrt" ] ]
TatsukichiShibuya/theoretical_framework_for_target_propagation
[ "b6ee350c238be645e9cb42e12c79606f28f20f11" ]
[ "lib/networks.py" ]
[ "#!/usr/bin/env python3\n# Copyright 2019 Alexander Meulemans\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nIn here, we define classes for fully connected multilayer perceptrons that are\ntrained by difference target propagation and its variants\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport warnings\nfrom lib.dtp_layers import DTPLayer\nfrom lib.dtpdrl_layers import DTPDRLLayer\nfrom tensorboardX import SummaryWriter\nimport lib.utils as utils\nfrom lib.utils import NetworkError\nimport pandas as pd\nimport torch.nn.functional as F\n\n# TODO: adjust the compute_output_target methods for computing voltage targets\n# instead of rate targets for the networks that use voltage targets\n\nclass DTPNetwork(nn.Module):\n \"\"\" A multilayer perceptron (MLP) network that will be trained by the\n difference target propagation (DTP) method.\n\n Attributes:\n layers (nn.ModuleList): a ModuleList with the layer objects of the MLP\n depth: the depth of the network (# hidden layers + 1)\n input (torch.Tensor): the input minibatch of the current training\n iteration. We need\n to save this tensor for computing the weight updates for the\n first hidden layer\n sigma: standard deviation of the gaussian that corrupts layer\n activations for computing the reconstruction losses.\n update_idx (None or int): the layer index of which the layer parameters\n are updated for the current mini-batch, when working in a randomized\n setting. If the randomized setting is not used, it is equal to None.\n\n Args:\n n_in: input dimension (flattened input assumed)\n n_hidden: list with hidden layer dimensions\n n_out: output dimension\n activation: activation function indicator for the hidden layers\n output_activation: activation function indicator for the output layer\n bias: boolean indicating whether the network uses biases or not\n sigma: standard deviation of the gaussian that corrupts layer\n activations for computing the reconstruction losses.\n forward_requires_grad (bool): Flag indicating whether the forward\n parameters require gradients that can be computed with autograd.\n This might be needed when comparing the DTP updates with BP updates\n and GN updates.\n initialization (str): the initialization method used for the forward\n and feedback matrices of the layers\n\n\n \"\"\"\n\n def __init__(self, n_in, n_hidden, n_out, activation='relu',\n output_activation='linear', bias=True, sigma=0.36,\n forward_requires_grad=False,\n initialization='orthogonal',\n fb_activation='relu',\n plots=None):\n nn.Module.__init__(self)\n\n self._depth = len(n_hidden) + 1\n self._layers = self.set_layers(n_in, n_hidden, n_out, activation,\n output_activation, bias,\n forward_requires_grad,\n initialization,\n fb_activation)\n self._input = None\n self._sigma = sigma\n self._forward_requires_grad = forward_requires_grad\n self._update_idx = None\n self._plots = plots\n if plots is not None:\n self.bp_angles = pd.DataFrame(columns=[i for i in range(0, self._depth)])\n self.gn_angles = pd.DataFrame(columns=[i for i in range(0, self._depth)])\n self.gnt_angles = pd.DataFrame(columns=[i for i in range(0, self._depth)])\n self.bp_activation_angles = pd.DataFrame(columns=[i for i in range(0, self._depth)])\n self.gn_activation_angles = pd.DataFrame(columns=[i for i in range(0, self._depth)])\n\n self.reconstruction_loss_init = pd.DataFrame(columns=[i for i in range(0, self._depth)])\n self.reconstruction_loss = pd.DataFrame(columns=[i for i in range(0, self._depth)])\n\n self.td_activation = pd.DataFrame(columns=[i for i in range(0, self._depth)])\n self.gn_activation = pd.DataFrame(columns=[i for i in range(0, self._depth)])\n self.bp_activation = pd.DataFrame(columns=[i for i in range(0, self._depth)])\n\n self.nullspace_relative_norm = pd.DataFrame(columns=[i for i in range(0, self._depth)])\n\n\n\n def set_layers(self, n_in, n_hidden, n_out, activation, output_activation,\n bias, forward_requires_grad, initialization,\n fb_activation):\n \"\"\"\n Create the layers of the network and output them as a ModuleList.\n Args:\n n_in: input dimension (flattened input assumed)\n n_hidden: list with hidden layer dimensions\n n_out: output dimension\n activation: activation function indicator for the hidden layers\n output_activation: activation function indicator for the output\n layer\n bias: boolean indicating whether the network uses biases or not\n forward_requires_grad (bool): Flag indicating whether the forward\n parameters require gradients that can be computed with autograd.\n This might be needed when comparing the DTP updates with BP updates\n and GN updates.\n initialization (str): the initialization method used for the forward\n and feedback matrices of the layers\n fb_activation (str): activation function indicator for the feedback\n path of the hidden layers\n\n \"\"\"\n n_all = [n_in] + n_hidden + [n_out]\n layers = nn.ModuleList()\n for i in range(1, len(n_all) - 1):\n layers.append(\n DTPLayer(n_all[i - 1], n_all[i], bias=bias,\n forward_activation=activation,\n feedback_activation=fb_activation,\n forward_requires_grad=forward_requires_grad,\n initialization=initialization\n ))\n layers.append(DTPLayer(n_all[-2], n_all[-1], bias=bias,\n forward_activation=output_activation,\n feedback_activation=fb_activation,\n forward_requires_grad=forward_requires_grad,\n initialization=initialization))\n return layers\n\n @property\n def depth(self):\n \"\"\"Getter for read-only attribute :attr:`depth`.\"\"\"\n return self._depth\n\n @property\n def layers(self):\n \"\"\"Getter for read-only attribute :attr:`layers`.\"\"\"\n return self._layers\n\n @property\n def sigma(self):\n \"\"\" Getter for read-only attribute sigma\"\"\"\n return self._sigma\n\n @property\n def input(self):\n \"\"\" Getter for attribute input.\"\"\"\n return self._input\n\n @input.setter\n def input(self, value):\n \"\"\" Setter for attribute input.\"\"\"\n self._input = value\n\n @property\n def forward_requires_grad(self):\n \"\"\" Getter for read-only attribute forward_requires_grad\"\"\"\n return self._forward_requires_grad\n\n @property\n def update_idx(self):\n \"\"\" Getter for attribute update_idx\"\"\"\n return self._update_idx\n\n @update_idx.setter\n def update_idx(self, value):\n \"\"\"Setter for attribute update_idx\"\"\"\n self._update_idx = value\n\n def forward(self, x):\n \"\"\" Propagate the input forward through the MLP network.\n\n Args:\n x: the input to the network\n\n returns:\n y: the output of the network\n \"\"\"\n self.input = x\n y = x\n\n for layer in self.layers:\n y = layer.forward(y)\n\n # the output of the network requires a gradient in order to compute the\n # target (in compute_output_target() )\n if y.requires_grad == False:\n y.requires_grad = True\n\n return y\n\n def compute_output_target(self, loss, target_lr):\n \"\"\"\n Compute the output target.\n Args:\n loss (nn.Module): output loss of the network\n target_lr: the learning rate for computing the output target based\n on the gradient of the loss w.r.t. the output layer\n\n Returns: Mini-batch of output targets\n \"\"\"\n output_activations = self.layers[-1].activations\n\n gradient = torch.autograd.grad(loss, output_activations,\n retain_graph=self.forward_requires_grad)\\\n [0].detach()\n output_targets = output_activations - \\\n target_lr*gradient\n return output_targets\n\n def propagate_backward(self, h_target, i):\n \"\"\"\n Propagate the output target backwards to layer i in a DTP-like fashion.\n Args:\n h_target (torch.Tensor): the output target\n i: the layer index to which the target must be propagated\n\n Returns: the target for layer i\n\n \"\"\"\n for k in range(self.depth-1, i, -1):\n h_current = self.layers[k].activations\n h_previous = self.layers[k-1].activations\n h_target = self.layers[k].backward(h_target, h_previous, h_current)\n return h_target\n\n def backward_random(self, loss, target_lr, i, save_target=False,\n norm_ratio=1.):\n \"\"\" Propagate the output target backwards through the network until\n layer i. Based on this target, compute the gradient of the forward\n weights and bias of layer i and save them in the parameter tensors.\n Args:\n loss (nn.Module): output loss of the network\n target_lr: the learning rate for computing the output target based\n on the gradient of the loss w.r.t. the output layer\n i: layer index to which the target needs to be propagated and the\n gradients need to be computed\n save_target (bool): flag indicating whether the target should be\n saved in the layer object for later use.\n norm_ratio (float): will only be used in children of DTPLayer for\n the minimal_norm update\n \"\"\"\n\n self.update_idx = i\n\n h_target = self.compute_output_target(loss, target_lr)\n\n h_target = self.propagate_backward(h_target, i)\n\n if save_target:\n self.layers[i].target = h_target\n\n if i == 0: # first hidden layer needs to have the input\n # for computing gradients\n self.layers[i].compute_forward_gradients(h_target, self.input,\n norm_ratio=norm_ratio)\n else:\n self.layers[i].compute_forward_gradients(h_target,\n self.layers[i-1].activations,\n norm_ratio=norm_ratio)\n\n def backward_all(self, output_target, save_target=False, norm_ratio=1.):\n \"\"\" Propagate the output_target backwards through all the layers. Based\n on these local targets, compute the gradient of the forward weights and\n biases of all layers.\n\n Args:\n output_target (torch.Tensor): a mini-batch of targets for the\n output layer.\n save_target (bool): flag indicating whether the target should be\n saved in the layer object for later use.\n norm_ratio (float): will only be used in children of DTPLayer for\n the minimal_norm update\n \"\"\"\n\n h_target = output_target\n\n if save_target:\n self.layers[-1].target = h_target\n for i in range(self.depth-1, 0, -1):\n h_current = self.layers[i].activations\n h_previous = self.layers[i-1].activations\n self.layers[i].compute_forward_gradients(h_target, h_previous,\n norm_ratio=norm_ratio)\n h_target = self.layers[i].backward(h_target, h_previous, h_current)\n if save_target:\n self.layers[i-1].target = h_target\n\n self.layers[0].compute_forward_gradients(h_target, self.input,\n norm_ratio=norm_ratio)\n\n def backward(self, loss, target_lr, save_target=False, norm_ratio=1.):\n \"\"\" Compute and propagate the output_target backwards through all the\n layers. Based on these local targets, compute the gradient of the\n forward weights and biases of all layers.\n\n Args:\n loss (nn.Module): output loss of the network\n target_lr: the learning rate for computing the output target based\n on the gradient of the loss w.r.t. the output layer\n save_target (bool): flag indicating whether the target should be\n saved in the layer object for later use.\n norm_ratio (float): will only be used in children of DTPLayer for\n the minimal_norm update\n \"\"\"\n\n output_target = self.compute_output_target(loss, target_lr)\n self.backward_all(output_target, save_target, norm_ratio=norm_ratio)\n\n def compute_feedback_gradients(self):\n \"\"\" Compute the local reconstruction loss for each layer and compute\n the gradient of those losses with respect to\n the feedback weights and biases. The gradients are saved in the\n feedback parameter tensors.\"\"\"\n\n for i in range(1, self.depth):\n h_corrupted = self.layers[i-1].activations + \\\n self.sigma * torch.randn_like(self.layers[i-1].activations)\n self.layers[i].compute_feedback_gradients(h_corrupted, self.sigma)\n\n def get_forward_parameter_list(self):\n \"\"\"\n Args:\n freeze_ouptut_layer (bool): flag indicating whether the forward\n parameters of the output layer should be excluded from the\n returned list.\n Returns: a list with all the forward parameters (weights and biases) of\n the network.\n\n \"\"\"\n parameterlist = []\n for layer in self.layers:\n parameterlist.append(layer.weights)\n if layer.bias is not None:\n parameterlist.append(layer.bias)\n return parameterlist\n\n def get_reduced_forward_parameter_list(self):\n \"\"\"\n Returns: a list with all the forward parameters of the network, except\n from the ones of the output layer.\n \"\"\"\n parameterlist = []\n for layer in self.layers[:-1]:\n parameterlist.append(layer.weights)\n if layer.bias is not None:\n parameterlist.append(layer.bias)\n return parameterlist\n\n def get_forward_parameters_last_two_layers(self):\n parameterlist = []\n for layer in self.layers[-2:]:\n parameterlist.append(layer.weights)\n if layer.bias is not None:\n parameterlist.append(layer.bias)\n return parameterlist\n\n def get_forward_parameters_last_three_layers(self):\n parameterlist = []\n for layer in self.layers[-3:]:\n parameterlist.append(layer.weights)\n if layer.bias is not None:\n parameterlist.append(layer.bias)\n return parameterlist\n\n def get_forward_parameters_last_four_layers(self):\n parameterlist = []\n for layer in self.layers[-4:]:\n parameterlist.append(layer.weights)\n if layer.bias is not None:\n parameterlist.append(layer.bias)\n return parameterlist\n\n def get_forward_parameter_list_first_layer(self):\n \"\"\"\n Returns: a list with only the forward parameters of the first layer.\n \"\"\"\n parameterlist = []\n parameterlist.append(self.layers[0].weights)\n if self.layers[0].bias is not None:\n parameterlist.append(self.layers[0].bias)\n return parameterlist\n\n def get_feedback_parameter_list(self):\n \"\"\"\n\n Returns (list): a list with all the feedback parameters (weights and\n biases) of the network. Note that the first hidden layer does not\n need feedback parameters, so they are not put in the list.\n\n \"\"\"\n parameterlist = []\n for layer in self.layers[1:]:\n parameterlist.append(layer.feedbackweights)\n if layer.feedbackbias is not None:\n parameterlist.append(layer.feedbackbias)\n return parameterlist\n\n def get_BP_updates(self, loss, i):\n \"\"\"\n Compute the gradients of the loss with respect to the forward\n parameters of layer i.\n Args:\n loss (torch.Tensor): the loss value of the current minibatch.\n i (int): layer index\n\n Returns (tuple): a tuple with the gradients of the loss with respect to\n the forward parameters of layer i, computed with backprop.\n\n \"\"\"\n return self.layers[i].compute_bp_update(loss)\n\n def compute_bp_angles(self, loss, i, retain_graph=False):\n \"\"\"\n Compute the angles of the current forward parameter updates of layer i\n with the backprop update for those parameters.\n Args:\n loss (torch.Tensor): the loss value of the current minibatch.\n i (int): layer index\n retain_graph (bool): flag indicating whether the graph of the\n network should be retained after computing the gradients or\n jacobians. If the graph will not be used anymore for the current\n minibatch afterwards, retain_graph should be False.\n\n Returns (tuple): Tuple containing the angle in degrees between the\n updates for the forward weights at index 0 and the forward bias\n at index 1 (if bias is not None).\n\n \"\"\"\n bp_gradients = self.layers[i].compute_bp_update(loss,\n retain_graph)\n gradients = self.layers[i].get_forward_gradients()\n if utils.contains_nan(bp_gradients[0].detach()):\n print('bp update contains nan (layer {}):'.format(i))\n print(bp_gradients[0].detach())\n if utils.contains_nan(gradients[0].detach()):\n print('weight update contains nan (layer {}):'.format(i))\n print(gradients[0].detach())\n if torch.norm(gradients[0].detach(), p='fro') < 1e-14:\n print('norm updates approximately zero (layer {}):'.format(i))\n print(torch.norm(gradients[0].detach(), p='fro'))\n print(gradients[0].detach())\n if torch.norm(gradients[0].detach(), p='fro') == 0:\n print('norm updates exactly zero (layer {}):'.format(i))\n print(torch.norm(gradients[0].detach(), p='fro'))\n print(gradients[0].detach())\n\n weights_angle = utils.compute_angle(bp_gradients[0].detach(),\n gradients[0])\n if self.layers[i].bias is not None:\n bias_angle = utils.compute_angle(bp_gradients[1].detach(),\n gradients[1])\n return (weights_angle, bias_angle)\n else:\n return (weights_angle, )\n\n def compute_gn_angles(self, output_activation, loss, damping, i,\n retain_graph=False):\n \"\"\"\n Compute the angles of the current forward parameter updates of layer i\n with the GN update for those parameters.\n Args:\n see lib.dtp_layers.DTPLayer.compute_gn_updates(...)\n i (int): layer index\n retain_graph (bool): flag indicating whether the graph of the\n network should be retained after computing the gradients or\n jacobians. If the graph will not be used anymore for the current\n minibatch afterwards, retain_graph should be False.\n\n Returns (tuple): Tuple containing the angle in degrees between the\n updates for the forward weights at index 0 and the forward bias\n at index 1 (if bias is not None).\n\n \"\"\"\n gn_gradients = self.layers[i].compute_gn_update(output_activation,\n loss,\n damping,\n retain_graph)\n gradients =self.layers[i].get_forward_gradients()\n weights_angle = utils.compute_angle(gn_gradients[0],\n gradients[0])\n if self.layers[i].bias is not None:\n bias_angle = utils.compute_angle(gn_gradients[1],\n gradients[1])\n return (weights_angle, bias_angle)\n else:\n return (weights_angle,)\n\n def compute_gn_activation_angle(self, output_activation, loss,\n damping, i, step,\n retain_graph=False,\n linear=False):\n \"\"\"\n Compute the angle between the difference between the target and layer\n activation and the gauss-newton update for the layers activation\n Args:\n see lib.dtp_layers.DTPLayer.compute_gn_activation_updates(...)\n i (int): layer index\n step (int): epoch step, just for logging\n retain_graph (bool): flag indicating whether the graph of the\n network should be retained after computing the gradients or\n jacobians. If the graph will not be used anymore for the current\n minibatch afterwards, retain_graph should be False.\n linear (bool): Flag indicating whether the GN update for the\n linear activations should be computed instead of for the\n nonlinear activations.\n\n Returns: The average angle in degrees\n\n \"\"\"\n if linear:\n target_difference = self.layers[i].linearactivations - \\\n self.layers[i].target\n else:\n target_difference = self.layers[i].activations - \\\n self.layers[i].target\n gn_updates = self.layers[i].compute_gn_activation_updates(\n output_activation,\n loss,\n damping,\n retain_graph=retain_graph,\n linear=linear\n )\n # print(f\"Layer {i}:\")\n # print(torch.mean(target_difference).item())\n # print(torch.mean(gn_updates).item())\n if self._plots is not None:\n self.td_activation.at[step, i] = torch.mean(target_difference).item()\n self.gn_activation.at[step, i] = torch.mean(gn_updates).item()\n\n # exit()\n gn_activationav = utils.compute_average_batch_angle(target_difference, gn_updates)\n return gn_activationav\n\n def compute_bp_activation_angle(self, loss, i, retain_graph=False,\n linear=False):\n \"\"\"\n Compute the angle between the difference between the target and layer\n activation and the backpropagation update for the layers activation\n Args:\n loss (torch.Tensor): the loss value of the current minibatch.\n i (int): layer index\n retain_graph (bool): flag indicating whether the graph of the\n network should be retained after computing the gradients or\n jacobians. If the graph will not be used anymore for the current\n minibatch afterwards, retain_graph should be False.\n linear (bool): Flag indicating whether the GN update for the\n linear activations should be computed instead of for the\n nonlinear activations.\n\n Returns : The average angle in degrees\n \"\"\"\n if linear:\n target_difference = self.layers[i].linearactivations - \\\n self.layers[i].target\n else:\n target_difference = self.layers[i].activations - \\\n self.layers[i].target\n bp_updates = self.layers[i].compute_bp_activation_updates(\n loss=loss,\n retain_graph=retain_graph,\n linear=linear\n ).detach()\n\n angle = utils.compute_average_batch_angle(target_difference.detach(),\n bp_updates)\n\n return angle\n\n def compute_gnt_angle(self, output_activation, loss, damping,\n i, step, retain_graph=False, linear=False):\n if i == 0:\n h_previous = self.input\n else:\n h_previous = self.layers[i-1].activations\n\n gnt_updates = self.layers[i].compute_gnt_updates(\n output_activation=output_activation,\n loss=loss,\n h_previous=h_previous,\n damping=damping,\n retain_graph=retain_graph,\n linear=linear\n )\n\n gradients = self.layers[i].get_forward_gradients()\n weights_angle = utils.compute_angle(gnt_updates[0], gradients[0])\n if self.layers[i].bias is not None:\n bias_angle = utils.compute_angle(gnt_updates[1], gradients[1])\n return (weights_angle, bias_angle)\n else:\n return (weights_angle, )\n\n\n def save_logs(self, writer, step):\n \"\"\" Save logs and plots for tensorboardX.\n\n Args:\n writer (SummaryWriter): summary writer from tensorboardX\n step (int): the global step used for the x-axis of the plots\n \"\"\"\n\n for i in range(len(self.layers)):\n name = 'layer {}'.format(i+1)\n self.layers[i].save_logs(writer, step, name,\n no_gradient=i==0)\n\n def save_feedback_batch_logs(self, writer, step, init=False):\n \"\"\"\n Save the logs for the current minibatch on tensorboardX.\n Args:\n writer (SummaryWriter): summary writer from tensorboardX\n step (int): the global step used for the x-axis of the plots\n init (bool): flag indicating that the training is in the\n initialization phase (only training the feedback weights).\n \"\"\"\n for i in range(len(self.layers)):\n name = 'layer {}'.format(i+1)\n self.layers[i].save_feedback_batch_logs(writer, step, name,\n no_gradient=i == 0, init=init)\n\n def save_bp_angles(self, writer, step, loss, retain_graph=False):\n \"\"\"\n Save the angles of the current forward parameter updates\n with the backprop update for those parameters on tensorboardX.\n Args:\n writer (SummaryWriter): summary writer from tensorboardX\n step (int): the global step used for the x-axis of the plots\n loss (torch.Tensor): the loss value of the current minibatch.\n retain_graph (bool): flag indicating whether the graph of the\n network should be retained after computing the gradients or\n jacobians. If the graph will not be used anymore for the current\n minibatch afterwards, retain_graph should be False.\n\n \"\"\"\n # if self.update_idx is None, the randomized setting is not used and\n # all the layers have their parameters updated. The angle should thus\n # be computed for all layers\n if self.update_idx is None:\n layer_indices = range(len(self.layers))\n else:\n layer_indices = [self.update_idx]\n\n for i in layer_indices:\n name = 'layer {}'.format(i+1)\n if i != layer_indices[-1]: # if it is not the last index, the graph\n # should be saved for the next index\n retain_graph_flag = True\n else:\n retain_graph_flag = retain_graph\n angles = self.compute_bp_angles(loss, i, retain_graph_flag)\n writer.add_scalar(\n tag='{}/weight_bp_angle'.format(name),\n scalar_value=angles[0],\n global_step=step\n )\n\n if self._plots is not None:\n self.bp_angles.at[step, i] = angles[0].item()\n\n\n if self.layers[i].bias is not None:\n writer.add_scalar(\n tag='{}/bias_bp_angle'.format(name),\n scalar_value=angles[1],\n global_step=step\n )\n\n def save_gn_angles(self, writer, step, output_activation, loss, damping,\n retain_graph=False):\n \"\"\"\n Save the angles of the current forward parameter updates of layer i\n with the backprop update for those parameters. on tensorboardX.\n Args:\n writer (SummaryWriter): summary writer from tensorboardX\n step (int): the global step used for the x-axis of the plots\n see lib.dtp_layers.DTPLayer.compute_gn_updates(...)\n retain_graph (bool): flag indicating whether the graph of the\n network should be retained after computing the gradients or\n jacobians. If the graph will not be used anymore for the current\n minibatch afterwards, retain_graph should be False.\n\n \"\"\"\n # if self.update_idx is None, the randomized setting is not used and\n # all the layers have their parameters updated. The angle should thus\n # be computed for all layers\n if self.update_idx is None:\n layer_indices = range(len(self.layers))\n else:\n layer_indices = [self.update_idx]\n\n for i in layer_indices:\n name = 'layer {}'.format(i+1)\n if i != layer_indices[-1]: # if it is not the last index, the graph\n # should be saved for the next index\n retain_graph_flag = True\n else:\n retain_graph_flag = retain_graph\n angles = self.compute_gn_angles(output_activation, loss, damping,\n i, retain_graph_flag)\n writer.add_scalar(\n tag='{}/weight_gn_angle'.format(name),\n scalar_value=angles[0],\n global_step=step\n )\n\n if self._plots is not None:\n self.gn_angles.at[step, i] = angles[0].item()\n\n if self.layers[i].bias is not None:\n writer.add_scalar(\n tag='{}/bias_gn_angle'.format(name),\n scalar_value=angles[1],\n global_step=step\n )\n\n def save_gnt_angles(self, writer, step, output_activation, loss,\n damping, retain_graph=False, custom_result_df=None):\n # if self.update_idx is None, the randomized setting is not used and\n # all the layers have their parameters updated. The angle should thus\n # be computed for all layers\n # print('saving gnt angles')\n if self.update_idx is None:\n layer_indices = range(len(self.layers)-1)\n else:\n layer_indices = [self.update_idx]\n\n # assign a damping constant for each layer for computing the gnt angles\n if isinstance(damping, float):\n damping = [damping for i in range(self.depth)]\n else:\n # print(damping)\n # print(len(damping))\n # print(layer_indices)\n # print(len(layer_indices))\n assert len(damping) == len(layer_indices)\n\n for i in layer_indices:\n name = 'layer {}'.format(i + 1)\n if i != layer_indices[-1]: # if it is not the last index, the graph\n # should be saved for the next index\n retain_graph_flag = True\n else:\n retain_graph_flag = retain_graph\n angles = self.compute_gnt_angle(output_activation=output_activation,\n loss=loss,\n damping=damping[i],\n i=i,\n step=step,\n retain_graph=retain_graph_flag)\n if custom_result_df is not None:\n custom_result_df.at[step,i] = angles[0].item()\n else:\n writer.add_scalar(\n tag='{}/weight_gnt_angle'.format(name),\n scalar_value=angles[0],\n global_step=step\n )\n\n if self._plots is not None:\n # print('saving gnt angles')\n # print(angles[0].item())\n self.gnt_angles.at[step, i] = angles[0].item()\n\n if self.layers[i].bias is not None:\n writer.add_scalar(\n tag='{}/bias_gnt_angle'.format(name),\n scalar_value=angles[1],\n global_step=step\n )\n\n def save_nullspace_norm_ratio(self, writer, step, output_activation,\n retain_graph=False):\n if self.update_idx is None:\n layer_indices = range(len(self.layers))\n else:\n layer_indices = [self.update_idx]\n\n for i in layer_indices:\n name = 'layer {}'.format(i + 1)\n if i != layer_indices[-1]: # if it is not the last index, the graph\n # should be saved for the next index\n retain_graph_flag = True\n else:\n retain_graph_flag = retain_graph\n\n relative_norm = self.layers[i].compute_nullspace_relative_norm(\n output_activation,\n retain_graph=retain_graph_flag\n )\n\n writer.add_scalar(\n tag='{}/nullspace_relative_norm'.format(name),\n scalar_value=relative_norm,\n global_step=step\n )\n\n if self._plots is not None:\n self.nullspace_relative_norm.at[step, i] = relative_norm.item()\n\n\n def save_bp_activation_angle(self, writer, step, loss,\n retain_graph=False):\n \"\"\"\n Save the angle between the difference between the target and layer\n activation and the backpropagation update for the layers activation\n Args:\n writer (SummaryWriter): summary writer from tensorboardX\n step (int): the global step used for the x-axis of the plots\n see lib.dtp_layers.DTPLayer.compute_bp_activation_updates(...)\n retain_graph (bool): flag indicating whether the graph of the\n network should be retained after computing the gradients or\n jacobians. If the graph will not be used anymore for the current\n minibatch afterwards, retain_graph should be False.\n \"\"\"\n # if self.update_idx is None, the randomized setting is not used and\n # all the layers have their parameters updated. The angle should thus\n # be computed for all layers\n if self.update_idx is None:\n layer_indices = range(len(self.layers))\n else:\n layer_indices = [self.update_idx]\n\n for i in layer_indices:\n name = 'layer {}'.format(i + 1)\n if i != layer_indices[-1]: # if it is not the last index, the graph\n # should be saved for the next index\n retain_graph_flag = True\n else:\n retain_graph_flag = retain_graph\n angle = self.compute_bp_activation_angle(loss, i,\n retain_graph_flag)\n\n\n writer.add_scalar(\n tag='{}/activation_bp_angle'.format(name),\n scalar_value=angle,\n global_step=step\n )\n if self._plots is not None:\n self.bp_activation_angles.at[step, i] = angle.item()\n return\n\n def save_gn_activation_angle(self, writer, step, output_activation, loss,\n damping, retain_graph=False):\n \"\"\"\n Save the angle between the difference between the target and layer\n activation and the gauss-newton update for the layers activation\n Args:\n writer (SummaryWriter): summary writer from tensorboardX\n step (int): the global step used for the x-axis of the plots\n see lib.dtp_layers.DTPLayer.compute_bp_activation_updates(...)\n retain_graph (bool): flag indicating whether the graph of the\n network should be retained after computing the gradients or\n jacobians. If the graph will not be used anymore for the current\n minibatch afterwards, retain_graph should be False.\n \"\"\"\n # if self.update_idx is None, the randomized setting is not used and\n # all the layers have their parameters updated. The angle should thus\n # be computed for all layers\n if self.update_idx is None:\n layer_indices = range(len(self.layers))\n else:\n layer_indices = [self.update_idx]\n\n for i in layer_indices:\n name = 'layer {}'.format(i + 1)\n if i != layer_indices[-1]: # if it is not the last index, the graph\n # should be saved for the next index\n retain_graph_flag = True\n else:\n retain_graph_flag = retain_graph\n angle = self.compute_gn_activation_angle(output_activation, loss,\n damping, i, step,\n retain_graph_flag)\n writer.add_scalar(\n tag='{}/activation_gn_angle'.format(name),\n scalar_value=angle,\n global_step=step\n )\n\n if self._plots is not None:\n self.gn_activation_angles.at[step, i] = angle.item()\n return\n\n\n def get_av_reconstruction_loss(self):\n \"\"\" Get the average reconstruction loss of the network across its layers\n for the current mini-batch.\n Args:\n network (networks.DTPNetwork): network\n Returns (torch.Tensor):\n Tensor containing a scalar of the average reconstruction loss\n \"\"\"\n reconstruction_losses = np.array([])\n\n for layer in self.layers[1:]:\n reconstruction_losses = np.append(reconstruction_losses,\n layer.reconstruction_loss)\n\n return np.mean(reconstruction_losses)\n\n\nclass LeeDTPNetwork(nn.Module):\n \"\"\" Class for the DTP network used in Lee2015 to classify MNIST digits. In\n this network, the target for the last hidden layer is computed by error\n backpropagation instead of DTP. \"\"\"\n\n def __init__(self, n_in, n_hidden, n_out, activation='tanh',\n output_activation='linear', bias=True, sigma=0.36,\n initialization='orthogonal',\n forward_requires_grad=False):\n \"\"\" See arguments of __init__ of class DTP Network\n\n Attributes:\n dtpnetwork (DTPNetwork): a DTP Network of all the layers except\n from the output\n layer. These layers will be trained by the DTP method.\n linearlayer (nn.Linear): the output linear layer. On this layer, the\n CrossEntropyLoss will be applied during training.\n hiddengradient: the gradient of the loss with respect to the\n activation of the last hidden layer of the network.\n depth (int): depth of the network (number of hidden layers + 1)\n initialization (str): the initialization method used for the forward\n and feedback matrices of the layers\n \"\"\"\n nn.Module.__init__(self)\n\n self._dtpnetwork = DTPNetwork(n_in, n_hidden[:-1], n_hidden[-1],\n activation=activation,\n output_activation=activation,\n bias=bias, sigma=sigma,\n initialization=initialization,\n forward_requires_grad=\n forward_requires_grad)\n\n self._linearlayer = nn.Linear(n_hidden[-1], n_out, bias=bias)\n if initialization == 'orthogonal':\n gain = np.sqrt(6./(n_hidden[-1] + n_out))\n nn.init.orthogonal_(self._linearlayer.weight, gain=gain)\n elif initialization == 'xavier':\n nn.init.xavier_uniform_(self._linearlayer.weight)\n else:\n raise ValueError('Given initialization \"{}\" is not supported.'\\\n .format(initialization))\n if bias:\n nn.init.constant_(self._linearlayer.bias, 0)\n self._depth = len(n_hidden) + 1\n\n if output_activation != 'linear':\n raise ValueError('{} is not supported as an output '\n 'activation'.format(output_activation))\n\n self._update_idx = None\n self._forward_requires_grad = forward_requires_grad\n\n @property\n def dtpnetwork(self):\n \"\"\" Getter for read-only attribute dtpnetwork\"\"\"\n return self._dtpnetwork\n\n @property\n def linearlayer(self):\n \"\"\" Getter for read-only attribute linearlayer\"\"\"\n return self._linearlayer\n\n @property\n def depth(self):\n \"\"\"Getter for read-only attribute :attr:`depth`.\"\"\"\n return self._depth\n\n @property\n def update_idx(self):\n \"\"\" Getter for attribute update_idx\"\"\"\n return self._update_idx\n\n @update_idx.setter\n def update_idx(self, value):\n \"\"\"Setter for attribute update_idx\"\"\"\n self._update_idx = value\n\n @property\n def forward_requires_grad(self):\n \"\"\" Getter for read-only attribute forward_requires_grad\"\"\"\n return self._forward_requires_grad\n\n @property\n def layers(self):\n \"\"\" Getter for attribute layers.\n Warning: only the layers of the dtp network are returned, not the\n extra linear layer output layer\"\"\"\n return self.dtpnetwork.layers\n\n def forward(self, x):\n x = self.dtpnetwork.forward(x)\n if x.requires_grad == False: # if statement is needed to be sure that\n # x is a leaf node. Otherwise, we are not\n # allowed to change the grad attribute.\n x.requires_grad = True\n x = self.linearlayer(x)\n #TODO: implement option for other activation functions besides linear\n return x\n\n def backward(self, loss, target_lr, save_target=False):\n \"\"\" Compute the gradients of the output weights and bias, compute\n the target for the last hidden layer based on backprop, propagate target\n backwards and compute parameter updates for the rest of the DTP network.\n \"\"\"\n\n # compute the gradients of the weights and bias of the output linear\n # layer. We cannot do this with loss.backward(), as then the\n # gradients of all leaf nodes will be computed and stored in the .grad\n # attributes of all layer parameters with requires_grad=True. We only\n # need the gradients with respect to the last hidden layer and the\n # weight and bias of the output linear layer.\n\n gradients = torch.autograd.grad(loss, self.linearlayer.parameters(),\n retain_graph=True)\n for i, param in enumerate(self.linearlayer.parameters()):\n param.grad = gradients[i].detach()\n\n hidden_activations = self.dtpnetwork.layers[-1].activations\n hiddengradient = torch.autograd.grad(loss, hidden_activations,\n retain_graph=\n self.forward_requires_grad)\n hiddengradient = hiddengradient[0].detach()\n\n\n hidden_targets = hidden_activations - target_lr*hiddengradient\n self.dtpnetwork.backward_all(hidden_targets, save_target)\n\n def compute_feedback_gradients(self):\n \"\"\" Compute the local reconstruction loss for each layer of the\n dtp network and compute the gradient of those losses with respect to\n the feedback weights and biases. The gradients are saved in the\n feedback parameter tensors.\"\"\"\n\n self.dtpnetwork.compute_feedback_gradients()\n\n def get_forward_parameter_list(self):\n \"\"\"\n Returns: a list with all the forward parameters (weights and biases) of\n the network.\n\n \"\"\"\n parameterlist = self.dtpnetwork.get_forward_parameter_list()\n parameterlist.append(self.linearlayer.weight)\n if self.linearlayer.bias is not None:\n parameterlist.append(self.linearlayer.bias)\n return parameterlist\n\n def get_feedback_parameter_list(self):\n \"\"\"\n\n Returns (list): a list with all the feedback parameters (weights and\n biases) of the network. Note that the first hidden layer does not\n need feedback parameters, so they are not put in the list.\n\n \"\"\"\n return self.dtpnetwork.get_feedback_parameter_list()\n\n def get_reduced_forward_parameter_list(self):\n \"\"\"\n Get the forward parameters of all the layers that will be trained by\n DTP, and not by BP (thus all the layer parameters except from the output\n layer and the last hidden layer.\n Returns: a list with all the parameters that will be trained by\n difference target propagtion\n\n \"\"\"\n if self.dtpnetwork.layers[-1].bias is not None:\n return self.dtpnetwork.get_forward_parameter_list()[:-2]\n else:\n return self.dtpnetwork.get_forward_parameter_list()[:-1]\n\n def save_logs(self, writer, step):\n \"\"\" Save logs and plots for tensorboardX.\n\n Args:\n writer (SummaryWriter): summary writer from tensorboardX\n step (int): the global step used for the x-axis of the plots\n \"\"\"\n\n self.dtpnetwork.save_logs(writer, step)\n\n output_weights = self.linearlayer.weight\n output_bias = self.linearlayer.bias\n\n name = 'layer {}'.format(self.dtpnetwork.depth + 1)\n\n forward_weights_norm = torch.norm(output_weights)\n forward_bias_norm = torch.norm(output_bias)\n\n forward_weights_gradients_norm = torch.norm(output_weights.grad)\n forward_bias_gradients_norm = torch.norm(output_bias.grad)\n\n writer.add_scalar(tag='{}/forward_weights_norm'.format(name),\n scalar_value=forward_weights_norm,\n global_step=step)\n writer.add_scalar(tag='{}/forward_bias_norm'.format(name),\n scalar_value=forward_bias_norm,\n global_step=step)\n writer.add_scalar(tag='{}/forward_weights_gradients_norm'.format(name),\n scalar_value=forward_weights_gradients_norm,\n global_step=step)\n writer.add_scalar(tag='{}/forward_bias_gradients_norm'.format(name),\n scalar_value=forward_bias_gradients_norm,\n global_step=step)\n\n def save_feedback_batch_logs(self, writer, step):\n \"\"\"\n Save the logs for the current minibatch on tensorboardX.\n Args:\n writer (SummaryWriter): summary writer from tensorboardX\n step (int): the global step used for the x-axis of the plots\n \"\"\"\n self.dtpnetwork.save_feedback_batch_logs(writer, step)\n\n def save_bp_angles(self, writer, step, loss, retain_graph=False):\n \"\"\"\n See DTPNetwork.save_bp_angles\n\n \"\"\"\n self.dtpnetwork.save_bp_angles(writer, step, loss, retain_graph)\n\n def save_gn_angles(self, writer, step, output_activation, loss, damping,\n retain_graph=False):\n \"\"\"\n See DTPNetwork.save_gn_angles\n\n \"\"\"\n self.dtpnetwork.save_gn_angles(writer, step, output_activation, loss,\n damping, retain_graph)\n\n def save_bp_activation_angle(self, writer, step, loss,\n retain_graph=False):\n \"\"\" See DTPNetwork.save_bp_activation_angle. \"\"\"\n self.dtpnetwork.save_bp_activation_angle(writer, step, loss,\n retain_graph)\n\n def save_gn_activation_angle(self, writer, step, output_activation, loss,\n damping, retain_graph=False):\n \"\"\" See DTPNetwork.save_gn_activation_angle. \"\"\"\n\n self.dtpnetwork.save_gn_activation_angle(writer, step,\n output_activation, loss,\n damping, retain_graph)\n\n def get_av_reconstruction_loss(self):\n \"\"\" Get the average reconstruction loss of the network across its layers\n for the current mini-batch.\n Args:\n network (networks.DTPNetwork): network\n Returns (torch.Tensor):\n Tensor containing a scalar of the average reconstruction loss\n \"\"\"\n return self.dtpnetwork.get_av_reconstruction_loss()\n\n\nclass DTPDRLNetwork(DTPNetwork):\n \"\"\"\n A class for networks that contain DTPDRLLayers.\n #FIXME: now the target for the nonlinear output is computed and the path\n is trained for propagating the nonlinear output target to the hidden\n layers. I think training will go better if you compute a target for the\n linear output activation and train the feedback path to map linear\n output targets towards the hidden layers.\n \"\"\"\n\n def set_layers(self, n_in, n_hidden, n_out, activation, output_activation,\n bias, forward_requires_grad, initialization,\n fb_activation):\n \"\"\"\n Create the layers of the network and output them as a ModuleList.\n Args:\n n_in: input dimension (flattened input assumed)\n n_hidden: list with hidden layer dimensions\n n_out: output dimension\n activation: activation function indicator for the hidden layers\n output_activation: activation function indicator for the output\n layer\n bias: boolean indicating whether the network uses biases or not\n forward_requires_grad (bool): Flag indicating whether the forward\n parameters require gradients that can be computed with autograd.\n This might be needed when comparing the DTP updates with BP updates\n and GN updates.\n initialization (str): the initialization method used for the forward\n and feedback matrices of the layers\n fb_activation (str): activation function indicator for the feedback\n path of the hidden layers\n\n \"\"\"\n n_all = [n_in] + n_hidden + [n_out]\n layers = nn.ModuleList()\n for i in range(1, len(n_all) - 1):\n layers.append(\n DTPDRLLayer(n_all[i - 1], n_all[i], bias=bias,\n forward_activation=activation,\n feedback_activation=fb_activation,\n forward_requires_grad=forward_requires_grad,\n initialization=initialization\n ))\n layers.append(DTPDRLLayer(n_all[-2], n_all[-1], bias=bias,\n forward_activation=output_activation,\n feedback_activation=fb_activation,\n forward_requires_grad=forward_requires_grad,\n initialization=initialization))\n return layers\n\n def compute_feedback_gradients(self, i):\n \"\"\"\n Compute the difference reconstruction loss for layer i of the network\n and compute the gradient of this loss with respect to the feedback\n parameters. The gradients are saved in the .grad attribute of the\n feedback parameter tensors.\n\n \"\"\"\n # save the index of the layer for which the reconstruction loss is\n # computed.\n self.reconstruction_loss_index = i\n\n h_corrupted = self.layers[i-1].activations + \\\n self.sigma * torch.randn_like(self.layers[i - 1].activations)\n output_corrupted = self.dummy_forward(h_corrupted, i-1)\n h_current_reconstructed = self.propagate_backward(output_corrupted, i)\n self.layers[i].compute_feedback_gradients(h_corrupted,\n h_current_reconstructed,\n self.layers[i-1].activations,\n self.sigma)\n\n def dummy_forward(self, h, i):\n \"\"\"\n Propagates the activations h of layer i forward to the output of the\n network, without saving activations and linear activations in the layer\n objects.\n Args:\n h (torch.Tensor): activations\n i (int): index of the layer of which h are the activations\n\n Returns: output of the network with h as activation for layer i\n\n \"\"\"\n y = h\n\n for layer in self.layers[i+1:]:\n y = layer.dummy_forward(y)\n\n return y\n\n def get_av_reconstruction_loss(self):\n \"\"\" Get the reconstruction loss of the network for the layer of which\n the feedback parameters were trained on the current mini-batch\n Returns (torch.Tensor):\n Tensor containing a scalar of the average reconstruction loss\n \"\"\"\n reconstruction_loss = self.layers[self.reconstruction_loss_index].\\\n reconstruction_loss\n return reconstruction_loss\n\n\nclass BPNetwork(nn.Module):\n\n def __init__(self, n_in, n_hidden, n_out, activation='relu',\n output_activation='linear', bias=True,\n initialization='orthogonal'):\n super().__init__()\n if n_hidden is None:\n n_all = [n_in, n_out]\n else:\n n_all = [n_in] + n_hidden + [n_out]\n self.layers = nn.ModuleList()\n for i in range(1, len(n_all)):\n layer = nn.Linear(n_all[i-1], n_all[i], bias=bias)\n if initialization == 'orthogonal':\n gain = np.sqrt(6. / (n_all[i-1] + n_all[i]))\n nn.init.orthogonal_(layer.weight, gain=gain)\n elif initialization == 'xavier':\n nn.init.xavier_uniform_(layer.weight)\n elif initialization == 'xavier_normal':\n nn.init.xavier_normal_(layer.weight)\n elif initialization == 'teacher':\n nn.init.xavier_normal_(layer.weight, gain=3.)\n else:\n raise ValueError('Provided weight initialization \"{}\" is not '\n 'supported.'.format(initialization))\n if bias:\n nn.init.constant_(layer.bias, 0)\n\n self.layers.append(layer)\n self.activation = activation\n self.output_activation = output_activation\n\n @staticmethod\n def nonlinearity(x, nonlinearity):\n if nonlinearity == 'tanh':\n return torch.tanh(x)\n elif nonlinearity == 'relu':\n return F.relu(x)\n elif nonlinearity == 'linear':\n return x\n elif nonlinearity == 'leakyrelu':\n return F.leaky_relu(x, 0.2)\n elif nonlinearity == 'sigmoid':\n return torch.sigmoid(x)\n else:\n raise ValueError('The provided forward activation {} is not '\n 'supported'.format(nonlinearity))\n\n def forward(self, x):\n for layer in self.layers[:-1]:\n x = layer(x)\n x = self.nonlinearity(x, self.activation)\n\n x = self.layers[-1](x)\n x = self.nonlinearity(x, self.output_activation)\n return x\n\n def save_logs(self, writer, step):\n pass\n\n def set_requires_grad(self, value):\n \"\"\"\n Sets the 'requires_grad' attribute of the all the parameters\n to the given value\n Args:\n value (bool): True or False\n \"\"\"\n if not isinstance(value, bool):\n raise TypeError('The given value should be a boolean.')\n\n for param in self.parameters():\n param.requires_grad = value\n\n\nclass GNTNetwork(DTPDRLNetwork):\n \"\"\" Network that computes exact GN targets for the nonlinear hidden layer\n activations and computes parameter updates using a gradient step on the\n local loss.\"\"\"\n def __init__(self, n_in, n_hidden, n_out, activation='relu',\n output_activation='linear', bias=True, sigma=0.36,\n forward_requires_grad=False,\n initialization='orthogonal',\n fb_activation='relu',\n plots=None,\n damping=0.):\n super().__init__(n_in=n_in,\n n_hidden=n_hidden,\n n_out=n_out,\n activation=activation,\n output_activation=output_activation,\n bias=bias,\n sigma=sigma,\n forward_requires_grad=forward_requires_grad,\n initialization=initialization,\n fb_activation=fb_activation,\n plots=plots)\n self._damping = damping\n\n @property\n def damping(self):\n \"\"\" Getter for read only attr damping\"\"\"\n return self._damping\n\n def backward_random(self, loss, target_lr, i,\n norm_ratio=1., save_target=False):\n\n self.update_idx = i\n output_activation = self.layers[-1].activations\n\n layer_update = self.layers[i].compute_gn_activation_updates(\n output_activation, loss,\n damping=self.damping,\n retain_graph=self.forward_requires_grad,\n linear=False).detach()\n\n h_target = self.layers[i].activations - layer_update\n\n if save_target:\n self.layers[i].target = h_target\n\n if i == 0:\n self.layers[i].compute_forward_gradients(h_target, self.input,\n norm_ratio=norm_ratio)\n else:\n self.layers[i].compute_forward_gradients(h_target,\n self.layers[i-1].activations,\n norm_ratio=norm_ratio)\n\n def backward(self, loss, target_lr, save_target=False, norm_ratio=1.):\n output_activation = self.layers[-1].activations\n\n for i in range(self.depth):\n if i == self.depth -1:\n retain_graph = self.forward_requires_grad\n else:\n retain_graph = True\n\n layer_update = self.layers[i].compute_gn_activation_updates(\n output_activation, loss,\n damping=self.damping,\n retain_graph=retain_graph,\n linear=False).detach()\n\n h_target = self.layers[i].activations - layer_update\n if save_target:\n self.layers[i].target = h_target\n\n if i == 0:\n self.layers[i].compute_forward_gradients(h_target, self.input,\n norm_ratio=norm_ratio)\n else:\n self.layers[i].compute_forward_gradients(h_target,\n self.layers[\n i - 1].activations,\n norm_ratio=norm_ratio)\n\n def compute_feedback_gradients(self):\n pass\n\n\n\n" ]
[ [ "torch.randn_like", "torch.mean", "numpy.sqrt", "torch.tanh", "numpy.mean", "torch.norm", "torch.nn.functional.relu", "torch.autograd.grad", "torch.sigmoid", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.init.xavier_normal_", "torch.nn.Linear", "numpy.append", "torch.nn.functional.leaky_relu", "numpy.array", "torch.nn.Module.__init__", "torch.nn.init.orthogonal_", "torch.nn.init.xavier_uniform_" ] ]
GiftofHermes/queue_simulation
[ "aeb7ef818e1b2671c55d33179a6dc72e67976351" ]
[ "simulation.py" ]
[ "from abc import ABC\r\nimport numpy as np\r\nfrom collections import deque\r\nimport heapq\r\n\r\nclass Simulation:\r\n \"\"\"\r\n Fill here according to python guidelines\r\n \"\"\"\r\n def __init__(self):\r\n self.event_id = 0\r\n self.time = 0.0\r\n self.event_queue = [] #should be a heapq\r\n self.num_events = 0\r\n self.total_wait_time = 0\r\n self.max_wait_time = 0\r\n self.queue_length = 0\r\n self.total_arrival = 0\r\n self.total_served = 0\r\n\r\n\r\n\r\n def handle_event(self, event):\r\n event = event[1]\r\n if event.type == 'arrival':\r\n self.total_arrival += 1\r\n self.queue_length += 1\r\n if self.event_queue:\r\n max_end_time_event = heapq.nlargest(1,\r\n self.event_queue,\r\n key=lambda x: x[1].end_time)\r\n start_time = max_end_time_event[0][1].end_time\r\n else:\r\n start_time = self.time\r\n service = (Service(id=self.event_id,\r\n event_time=self.time,\r\n start_time=start_time,\r\n duration=lambda: np.random.randint(6,10))\r\n )\r\n heapq.heappush(self.event_queue, (service.end_time, service))\r\n self.event_id += 1\r\n if event.type == 'service':\r\n self.queue_length -=1\r\n self.total_wait_time += self.time - event.event_time\r\n if self.max_wait_time < self.time - event.event_time:\r\n self.max_wait_time = self.time - event.event_time\r\n self.time += event.duration\r\n self.total_served += 1\r\n return self\r\n\r\n def __str__(self):\r\n return (f'Time: {self.time}\\n'\r\n f'queue_length: {self.queue_length}\\n'\r\n f'wait_time: {self.total_wait_time}\\n'\r\n f'average_wait_time: {self.total_wait_time / self.total_arrival}\\n'\r\n f'max_wait_time: {self.max_wait_time}'\r\n )\r\n\r\nclass Event(ABC):\r\n \"\"\"\r\n Abstract event class\r\n \"\"\"\r\n type: str = NotImplemented\r\n\r\n def __init__(self, id, event_time, start_time, duration=lambda: 0):\r\n self.id = id\r\n self.event_time = event_time\r\n self.start_time = start_time\r\n self.duration = duration() # may change to a function\r\n self.end_time = self.start_time + self.duration\r\n pass\r\n\r\n def __lt__(self, other):\r\n return self.end_time < other.end_time\r\n\r\n def __eq__(self, other):\r\n return self.end_time == other.end_time\r\n\r\n def __gt__(self, other):\r\n return self.end_time > other.end_time\r\n\r\nclass Arrival(Event):\r\n type = 'arrival'\r\n\r\nclass Service(Event):\r\n type = 'service'\r\n" ]
[ [ "numpy.random.randint" ] ]
jayanthkoushik/cmu-ammml-project
[ "d087924b64f91754addb19ef251b5dececb672e3" ]
[ "src/uniimg.py" ]
[ "# coding=utf-8\n# uniimg.py: unimodal image based classifier.\n\nfrom __future__ import print_function\nimport argparse\nimport sys\nimport os\nimport random\nimport glob\nimport cPickle\nimport math\nfrom datetime import datetime\n\nimport numpy as np\nfrom models.vgg16 import VGG16\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom scipy.misc import imread\n\n\nSPLIT_DIR = \"data/perssplit\"\nPICKLED_LABEL_FILE = \"data/labels.pickle\"\nPERS_FIELD_NAME = \"Answer.q7_persuasive\"\nGRAD_CLIP = 3\nDEFAULT_LEARNING_RATE = 0.0001\nDEFAULT_EPOCHS = 1\nDEFAULT_BATCH_SIZE = 100\n\nwith open(PICKLED_LABEL_FILE, \"rb\") as lf:\n labels_map = cPickle.load(lf)\n\n\ndef generate_batch(batch_ims):\n \"\"\"Generate a batch (X, y) from a list of images.\"\"\"\n batch_X = np.zeros((len(batch_ims), 3, 224, 224))\n batch_y = np.zeros((len(batch_ims), 1))\n for i, im_file in enumerate(batch_ims):\n img = imread(im_file).astype(\"float32\")\n img[:, :, 0] -= 103.939\n img[:, :, 1] -= 116.779\n img[:, :, 2] -= 123.68\n img = img.transpose((2, 0, 1))\n batch_X[i, :, :, :] = img\n\n file_id = im_file.split(\"/\")[-1].split(\"_\")[0]\n score = labels_map[file_id][PERS_FIELD_NAME]\n if score >= 5.5:\n batch_y[i] = 1\n return (batch_X, batch_y)\n\n\nclass RandomBatchGenerator(object):\n\n \"\"\"Generate random batches of data.\"\"\"\n\n def __init__(self, batch_size, typ, imdir, augment):\n # typ should be \"train\", \"val\", or \"test\".\n self._batch_size = batch_size\n self._ims = []\n self._idx = 0\n if augment is True:\n self._datagen = ImageDataGenerator(\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n rotation_range=30,\n width_shift_range=0.25,\n height_shift_range=0.25,\n shear_range=0.1,\n horizontal_flip=True,\n vertical_flip=True\n )\n else:\n self._datagen = None\n vids_file = os.path.join(SPLIT_DIR, \"{}.txt\".format(typ))\n with open(vids_file) as vf:\n for line in vf:\n vid_ims = os.path.join(imdir, line.strip(), \"*\")\n self._ims.extend(glob.glob(vid_ims))\n\n def __iter__(self):\n return self\n\n def next(self):\n batch_ims = random.sample(self._ims, self._batch_size)\n batch_X, batch_y = generate_batch(batch_ims)\n if self._datagen is None:\n return batch_X, batch_y\n else:\n return next(self._datagen.flow(\n X=batch_X,\n y=batch_y,\n batch_size=self._batch_size,\n shuffle=False\n ))\n\n\nclass VidBatchGenerator(object):\n\n \"\"\"Generate batches of data corresponding to a video.\"\"\"\n\n def __init__(self, batch_size, vid, imdir):\n self._batch_size = batch_size\n self._idx = 0\n vid_ims = os.path.join(imdir, vid, \"*\")\n self._ims = glob.glob(vid_ims)\n\n def __iter__(self):\n return self\n\n def next(self):\n if self._idx >= len(self._ims):\n self._idx = 0\n batch_ims = self._ims[self._idx:self._idx+self._batch_size]\n self._idx = self._idx + self._batch_size\n return generate_batch(batch_ims)\n\n\ndef eval_model_vid(model, batch_size, vid, imdir):\n \"\"\"Evaluate a model on a single video.\"\"\"\n vid_batch_generator = VidBatchGenerator(batch_size, vid, imdir)\n num_ims = len(vid_batch_generator._ims)\n l, acc = model.evaluate_generator(\n generator=vid_batch_generator,\n val_samples=num_ims,\n show_accuracy=True,\n verbose=1\n )\n return l, acc, num_ims\n\n\ndef eval_model(model, batch_size, typ, imdir):\n \"\"\"Evaluate a model. \"typ\" should be \"train\", \"val\", or \"test\".\"\"\"\n vids_file = os.path.join(SPLIT_DIR, \"{}.txt\".format(typ))\n total_vids = 0\n correct_vids = 0\n total_ims = 0\n correct_ims = 0\n with open(vids_file) as vf:\n for line in vf:\n _, acc, num_ims = eval_model_vid(model, batch_size, line.strip(), imdir)\n total_vids += 1\n if acc >= 0.5:\n correct_vids += 1\n total_ims += num_ims\n correct_ims += math.floor(acc * num_ims)\n vid_acc = float(correct_vids) / total_vids\n im_acc = float(correct_ims) / total_ims\n return vid_acc, im_acc\n\n\nif __name__==\"__main__\":\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\"--imdir\", type=str, required=True)\n arg_parser.add_argument(\"--vgg-weights\", type=str, required=True)\n arg_parser.add_argument(\"--save-path\", type=str, required=True)\n arg_parser.add_argument(\"--lr\", type=float, default=DEFAULT_LEARNING_RATE)\n arg_parser.add_argument(\"--epochs\", type=int, default=DEFAULT_EPOCHS)\n arg_parser.add_argument(\"--batch-size\", type=int, default=DEFAULT_BATCH_SIZE)\n arg_parser.add_argument(\"--train\", type=str, choices=[\"true\", \"false\"],\n required=True)\n arg_parser.add_argument(\"--default-arch-weights\", type=str,\n choices=[\"true\", \"false\"], required=True)\n arg_parser.add_argument(\"--augment\", type=str,\n choices=[\"true\", \"false\"], required=True)\n args = arg_parser.parse_args()\n\n print(\"Building model...\", end=\"\")\n sys.stdout.flush()\n default_arch_weights = args.default_arch_weights == \"true\"\n model = VGG16(args.vgg_weights, default_arch_weights)\n model.compile(optimizer=Adam(lr=args.lr, clipvalue=GRAD_CLIP),\n loss=\"binary_crossentropy\",\n class_mode=\"binary\")\n print(\"done\")\n\n if args.train == \"true\":\n date = str(datetime.now().date())\n args.save_path = os.path.join(args.save_path, date)\n os.makedirs(args.save_path)\n\n train_generator = RandomBatchGenerator(args.batch_size, \"train\",\n args.imdir, args.augment==\"true\")\n val_generator = RandomBatchGenerator(args.batch_size, \"val\",\n args.imdir, args.augment==\"true\")\n ckpt_clbk = ModelCheckpoint(\n filepath=os.path.join(args.save_path, \"checkpoint.h5\"),\n verbose=1,\n save_best_only=False\n )\n history = model.fit_generator(\n generator=train_generator,\n samples_per_epoch=len(train_generator._ims),\n nb_epoch=args.epochs,\n verbose=1,\n show_accuracy=True,\n callbacks=[ckpt_clbk],\n validation_data=val_generator,\n nb_val_samples=len(val_generator._ims) // 4,\n nb_worker=1\n )\n\n train_vid_acc, train_im_acc = eval_model(model, args.batch_size, \"train\",\n args.imdir)\n val_vid_acc, val_im_acc = eval_model(model, args.batch_size, \"val\", args.imdir)\n print(\"Training: video acc.: {}, image acc.: {}\".format(train_vid_acc,\n train_im_acc))\n print(\"Validation: video acc.: {}, image acc.: {}\".format(val_vid_acc,\n val_im_acc))\n\n if args.train == \"true\":\n print(\"Saving...\", end=\"\")\n sys.stdout.flush()\n model.save_weights(os.path.join(args.save_path, \"weights.h5\"),\n overwrite=True)\n print(\"\\n\".join(map(str, history.history[\"acc\"])),\n file=open(os.path.join(args.save_path, \"accs.txt\"), \"w\"))\n print(\"\\n\".join(map(str, history.history[\"loss\"])),\n file=open(os.path.join(args.save_path, \"losses.txt\"), \"w\"))\n summary = {\n \"learning_rate\": args.lr,\n \"epochs\": args.epochs,\n \"batch_size\": args.batch_size,\n \"train_vid_acc\": train_vid_acc,\n \"train_im_acc\": train_im_acc,\n \"val_vid_acc\": val_vid_acc,\n \"val_im_acc\": val_im_acc\n }\n print(summary, file=open(os.path.join(args.save_path, \"summary.txt\"), \"w\"))\n print(\"done.\")\n\n" ]
[ [ "scipy.misc.imread" ] ]
ajmaurais/peptide_analyzer
[ "62f37d88fefd0a8cfb57a8c157cfc85692956360" ]
[ "complete/main.py" ]
[ "\nimport os\nimport argparse\nimport pandas as pd\n\nimport uniprot\nimport molecular_formula\n\n\n# def main():\n# '''\n# Main method.\n#\n# This is the function which will be executed first when you call the program form the command line.\n# '''\n#\n# # Load and parse command line arguments\n# parser = argparse.ArgumentParser(prog=__file__,\n# description='Read a .tsv file containing peptides and UniProt '\n# 'IDs and do some analysis on them.')\n# parser.add_argument('atom_count_table',\n# help='Path to table to look up numbers and types of atoms in each residue.')\n# parser.add_argument('fasta_path', help='Path to fasta file to look up sequences.')\n# parser.add_argument('input_file',\n# help='Path to input file. Should be tsv with two columns; \"ID\" and \"seq\".')\n# args = parser.parse_args()\n\n# Hard coded file paths for debugging\nBASE_DATA_PATH = os.path.abspath(os.path.dirname(__file__) + '/../data')\nINPUT_FILE_PATH = BASE_DATA_PATH + '/input_peptides.tsv'\nATOM_COUNT_TABLE_PATH = BASE_DATA_PATH + '/residue_atoms.txt'\nOUTPUT_FILE_PATH = BASE_DATA_PATH + '/output_peptides.tsv'\n\n# read input file at INPUT_FILE_PATH\n# (Call pd.read_csv and set the output to a variable named 'dat')\ndat = pd.read_csv(INPUT_FILE_PATH, sep='\\t')\n\n# Load atom count table from ATOM_COUNT_TABLE_PATH and save it to a variable.\n# (Call molecular_formula.read_atom_count_table)\natom_counts = molecular_formula.read_atom_count_table(ATOM_COUNT_TABLE_PATH)\n\n# Make a list of Counter(s) for each peptide sequence.\n# (Call molecular_formula.calc_formula for each peptide sequence.)\nformulas = [molecular_formula.calc_formula(seq, atom_counts) for seq in dat['seq']]\n\n# Add a column to dat containing the peptide formula\n# (Call molecular_formula.format_formula for each peptide sequence and set the result\n# to a new column in dat named 'formula'.)\ndat['formula'] = [molecular_formula.format_formula(f) for f in formulas]\n\n# Add a column to dat containing the peptide mass\n# (Call molecular_formula.calc_mass for each peptide sequence and set the result\n# to a new column in dat named 'mass'.)\ndat['mass'] = [molecular_formula.calc_mass(f) for f in formulas]\n\n# Retrieve uniprot records for all protein IDs\n# (Call uniprot.get_unipror_record for each protein ID and save the results to a dict)\nrecords = {uniprot_id: uniprot.get_unipror_record(uniprot_id) for uniprot_id in dat['ID']}\n\n# Iterate through dat\nmod_locs = list()\ngo_fxn = list()\nlocations = list()\nfor i, row in dat.iterrows():\n # Check if there is a uniprot record for the current ID.\n if records[row['ID']] is not None:\n # Get the protein sequence for the current row from the Record\n protein_seq = records[row['ID']].sequence\n\n # Get the index locations of the modification(s) in the parent protein sequence.\n # (Call uniprot.get_modified_residue_numbers)\n mod_locs.append(uniprot.get_modified_residue_numbers(row['seq'], protein_seq))\n\n\n # Get the GO terms for protein function\n # (Call uniprot.get_go_fxn)\n go_fxn.append(uniprot.get_go_fxn(records[row['ID']].cross_references))\n\n # Get the subcellular location of the parent protein\n # The subcellular location are stored in the Record.comments member\n locations.append(uniprot.get_protein_location(records[row['ID']].comments))\n\n else:\n mod_locs.append('UNIPROT_RECORD_NOT_FOUND')\n locations.append('-')\n go_fxn.append('-')\n\n# Add a column to dat named 'modified_residues'\ndat['modified_residues'] = mod_locs\n# Add a column to dat named 'go_fxn'\ndat['go_fxn'] = go_fxn\n# Add a column to dat named 'subcelluar_loc'\ndat['subcelluar_loc'] = locations\n\n# Write dat to OUTPUT_FILE_PATH (Call DataFrame.to_tsv)\ndat.to_csv(OUTPUT_FILE_PATH, sep='\\t', index=False)\n\n# if __name__ == '__main__':\n# main()\n\n" ]
[ [ "pandas.read_csv" ] ]
vishalbelsare/h2o-3
[ "9322fb0f4c0e2358449e339a434f607d524c69fa" ]
[ "h2o-py/tests/testdir_sklearn/pyunit_sklearn_classification_all_estimators.py" ]
[ "from __future__ import print_function\nfrom collections import defaultdict\nfrom functools import partial\nimport gc, inspect, os, sys\n\nimport numpy as np\nfrom sklearn.datasets import make_classification\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\nimport h2o\nfrom h2o.sklearn.wrapper import H2OConnectionMonitorMixin\n\n\nsys.path.insert(1, os.path.join(\"..\",\"..\"))\nfrom tests import pyunit_utils, Namespace as ns\n\n\n\"\"\"\nThis test suite creates a default sklearn classification estimator for each H2O estimator.\nThen, it feeds them with H2O frames (more efficient and ensures compatibility with old API.)\nor with numpy arrays to provide the simplest approach for users wanting to use H2O like any sklearn estimator.\n\"\"\"\n\nseed = 2019\ninit_connection_args = dict(strict_version_check=False, show_progress=True)\n\nscores = defaultdict(dict)\n\n\ndef _get_data(format='numpy', n_classes=2):\n X, y = make_classification(n_samples=100, n_features=10, n_informative=5, n_classes=n_classes, random_state=seed)\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=seed)\n data = ns(X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)\n if format == 'h2o':\n for k, v in data.__dict__.items():\n setattr(data, k, h2o.H2OFrame(v))\n return data\n\n\ndef _get_default_args(estimator_cls):\n defaults = dict(\n H2OCoxProportionalHazardsClassifier=dict(),\n H2ODeepLearningClassifier=dict(seed=seed, reproducible=True),\n H2OGeneralizedAdditiveClassifier=dict(family='binomial', seed=seed, gam_columns=[\"C1\"]),\n H2OGeneralizedLinearClassifier=dict(family='binomial', seed=seed),\n )\n return defaults.get(estimator_cls.__name__, dict(seed=seed))\n\n\ndef _get_custom_behaviour(estimator_cls):\n custom = dict(\n # H2ODeepLearningClassifier=dict(scores_may_differ=True),\n )\n return custom.get(estimator_cls.__name__, dict())\n\n\ndef test_estimator_with_h2o_frames(estimator_cls):\n args = _get_default_args(estimator_cls)\n estimator = estimator_cls(**args)\n\n data = _get_data(format='h2o', n_classes=2)\n assert isinstance(data.X_train, h2o.H2OFrame)\n estimator.fit(data.X_train, data.y_train)\n preds = estimator.predict(data.X_test)\n print(preds)\n assert isinstance(preds, h2o.H2OFrame)\n if _get_custom_behaviour(estimator_cls).get('preds_as_vector', True):\n assert preds.dim == [len(data.X_test), 1], \"got {}\".format(preds.dim)\n else:\n assert preds.dim[0] == len(data.X_test)\n\n probs = estimator.predict_proba(data.X_test)\n print(probs)\n assert probs.dim == [len(data.X_test), 2], \"got {}\".format(probs.dim)\n assert np.allclose(np.sum(probs.as_data_frame().values, axis=1), 1.), \"`predict_proba` didn't return probabilities\"\n\n score = estimator.score(data.X_test, data.y_test)\n assert isinstance(score, float)\n skl_score = accuracy_score(data.y_test.as_data_frame().values, preds.as_data_frame().values)\n assert abs(score - skl_score) < 1e-6, \"score={}, skl_score={}\".format(score, skl_score)\n scores[estimator_cls].update(with_h2o_frames=score)\n\n\ndef test_estimator_with_numpy_arrays(estimator_cls):\n estimator = estimator_cls(init_connection_args=init_connection_args, **_get_default_args(estimator_cls))\n\n data = _get_data(format='numpy', n_classes=2)\n assert isinstance(data.X_train, np.ndarray)\n\n with estimator:\n estimator.fit(data.X_train, data.y_train)\n preds = estimator.predict(data.X_test)\n print(preds)\n assert isinstance(preds, np.ndarray)\n if _get_custom_behaviour(estimator_cls).get('preds_as_vector', True):\n assert preds.shape == (len(data.X_test),), \"got {}\".format(preds.shape)\n else:\n assert preds.shape[0] == len(data.X_test)\n\n probs = estimator.predict_proba(data.X_test)\n print(probs)\n assert probs.shape == (len(data.X_test), 2)\n assert np.allclose(np.sum(probs, axis=1), 1.), \"`predict_proba` didn't return probabilities\"\n\n score = estimator.score(data.X_test, data.y_test)\n assert isinstance(score, float)\n skl_score = accuracy_score(data.y_test, preds)\n assert abs(score - skl_score) < 1e-6\n scores[estimator_cls].update(with_numpy_arrays=score)\n\n\ndef test_scores_are_equivalent(estimator_cls):\n try:\n lk, rk = ('with_h2o_frames', 'with_numpy_arrays')\n est_scores = scores[estimator_cls]\n if lk in est_scores and rk in est_scores:\n assert abs(est_scores[lk] - abs(est_scores[rk])) < 1e-6, \\\n \"expected equivalent scores but got {lk}={lscore} and {rk}={rscore}\" \\\n .format(lk=lk, rk=rk, lscore=est_scores[lk], rscore=est_scores[rk])\n elif lk not in est_scores:\n print(\"no scores for {}\".format(estimator_cls.__name__+' '+lk))\n else:\n print(\"no scores for {}\".format(estimator_cls.__name__+' '+rk))\n except AssertionError as e:\n if _get_custom_behaviour(estimator_cls).get('scores_may_differ', False):\n print(\"ERROR !!! \"+str(e))\n else:\n raise e\n\n\ndef make_test(test, classifier):\n bound_test = partial(test, classifier)\n bound_test.__name__ = test.__name__\n pyunit_utils.tag_test(bound_test, classifier.__name__)\n return bound_test\n\n\ndef make_tests(classifier):\n return list(map(lambda test: make_test(test, classifier), [\n test_estimator_with_h2o_frames,\n test_estimator_with_numpy_arrays,\n test_scores_are_equivalent\n ]))\n\n\nfailing = [\n 'H2OStackedEnsembleClassifier', 'H2OUpliftRandomForestClassifier' # needs a separate test (requires models as parameters)\n]\nclassifiers = [cls for name, cls in inspect.getmembers(h2o.sklearn, inspect.isclass)\n if name.endswith('Classifier') and name not in ['H2OAutoMLClassifier']+failing]\npyunit_utils.run_tests([make_tests(c) for c in classifiers])\n" ]
[ [ "numpy.sum", "sklearn.datasets.make_classification", "sklearn.model_selection.train_test_split", "sklearn.metrics.accuracy_score" ] ]
aalto-ics-kepaco/lcms2struct_exp
[ "5baa3edd0e58d24f739efd4086031f6fbdba6ad9" ]
[ "data/import_sirius_scores.py" ]
[ "####\n#\n# The MIT License (MIT)\n#\n# Copyright 2021 Eric Bach <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is furnished\n# to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n####\nimport sqlite3\nimport argparse\nimport tarfile\nimport os\nimport pandas as pd\nimport re\nimport glob\nimport logging\nimport traceback\nimport numpy as np\n\nfrom massbank2db.db import MassbankDB\n\n# ================\n# Setup the Logger\nLOGGER = logging.getLogger(\"Import CSI:FingerID Scores\")\nLOGGER.setLevel(logging.WARNING)\nLOGGER.propagate = False\n\nCH = logging.StreamHandler()\nCH.setLevel(logging.WARNING)\n\nFORMATTER = logging.Formatter('[%(levelname)s] %(name)s : %(message)s')\nCH.setFormatter(FORMATTER)\n\nLOGGER.addHandler(CH)\n# ================\n\nDISCONNECTED_PATTER = re.compile(r\"^[0-9]+.*\")\nPREDIX_PATTERN = re.compile(r\"[A-Z]{2,3}\")\n\n\ndef create_tables(conn: sqlite3.Connection):\n conn.execute(\"CREATE TABLE IF NOT EXISTS scored_spectra_meta (\"\n \" accession VARCHAR PRIMARY KEY,\"\n \" dataset VARCHAR NOT NULL,\" \n \" original_accessions VARCHAR NOT NULL,\"\n \" precursor_mz REAL NOT NULL,\"\n \" precursor_type VARCHAR NOT NULL,\"\n \" molecule INTEGER NOT NULL,\"\n \" retention_time REAL NOT NULL,\"\n \" FOREIGN KEY (molecule) REFERENCES molecules(cid) ON DELETE CASCADE,\"\n \" FOREIGN KEY (dataset) REFERENCES datasets(name) ON DELETE CASCADE)\")\n\n conn.execute(\"CREATE TABLE IF NOT EXISTS candidates_spectra (\"\n \" spectrum VARCHAR NOT NULL,\"\n \" candidate INT NOT NULL,\"\n \" FOREIGN KEY (candidate) REFERENCES molecules(cid) ON DELETE CASCADE,\"\n \" FOREIGN KEY (spectrum) REFERENCES scored_spectra_meta(accession) ON DELETE CASCADE,\"\n \" CONSTRAINT spectrum_candidate_combination UNIQUE (spectrum, candidate))\")\n\n conn.execute(\"CREATE TABLE IF NOT EXISTS merged_accessions (\"\n \" massbank_accession VARCHAR PRIMARY KEY,\"\n \" merged_accession VARCHAR NOT NULL,\"\n \" FOREIGN KEY (massbank_accession) REFERENCES spectra_meta (accession) ON DELETE CASCADE,\"\n \" FOREIGN KEY (merged_accession) REFERENCES scored_spectra_meta(accession) ON DELETE CASCADE)\")\n\n conn.execute(\"CREATE TABLE IF NOT EXISTS scoring_methods (\"\n \" name VARCHAR PRIMARY KEY,\"\n \" method VARCHAR NOT NULL,\"\n \" description VARCHAR)\")\n\n conn.execute(\"CREATE TABLE IF NOT EXISTS spectra_candidate_scores (\"\n \" spectrum VARCHAR NOT NULL,\"\n \" candidate INTEGER NOT NULL,\"\n \" scoring_method VARCHAR NOT NULL,\"\n \" dataset VARCHAR NOT NULL,\"\n \" score REAL NOT NULL,\"\n \" FOREIGN KEY (spectrum) REFERENCES scored_spectra_meta(accession) ON DELETE CASCADE,\"\n \" FOREIGN KEY (candidate) REFERENCES molecules(cid) ON DELETE CASCADE,\"\n \" FOREIGN KEY (dataset) REFERENCES datasets(name) ON DELETE CASCADE,\"\n \" FOREIGN KEY (scoring_method) REFERENCES scoring_methods(name) ON DELETE CASCADE,\"\n \" PRIMARY KEY (spectrum, candidate, scoring_method))\")\n\n conn.execute(\"CREATE TABLE IF NOT EXISTS fingerprints_meta (\"\n \" name VARCHAR PRIMARY KEY,\"\n \" type VARCHAR NOT NULL,\"\n \" mode VARCHAR NOT NULL,\"\n \" param VARCHAR,\"\n \" timestamp VARCHAR NOT NULL,\"\n \" library VARCHAR NOT NULL,\"\n \" length INTEGER NOT NULL,\"\n \" is_folded BOOLEAN NOT NULL,\"\n \" hash_keys VARCHAR,\"\n \" max_values VARCHAR,\"\n \" CONSTRAINT type_mode_combination UNIQUE (type, mode, param))\")\n\n if args.include_sirius_fps:\n conn.execute(\"CREATE TABLE IF NOT EXISTS fingerprints_data__sirius_fps (\"\n \" molecule INTEGER NOT NULL PRIMARY KEY,\"\n \" bits VARCHAR NOT NULL,\"\n \" vals VARCHAR,\"\n \" FOREIGN KEY (molecule) REFERENCES molecules(cid) ON DELETE CASCADE)\")\n\n\ndef create_indices(conn: sqlite3.Connection):\n conn.execute(\"CREATE INDEX IF NOT EXISTS scc__spectrum ON spectra_candidate_scores(spectrum)\")\n conn.execute(\"CREATE INDEX IF NOT EXISTS scc__molecule ON spectra_candidate_scores(candidate)\")\n conn.execute(\"CREATE INDEX IF NOT EXISTS scc__scoring_method ON spectra_candidate_scores(scoring_method)\")\n conn.execute(\"CREATE INDEX IF NOT EXISTS scc__dataset ON spectra_candidate_scores(dataset)\")\n\n conn.execute(\"CREATE INDEX IF NOT EXISTS ma__merged_accession ON merged_accessions(merged_accession)\")\n\n conn.execute(\"CREATE INDEX IF NOT EXISTS cs__spectrum ON candidates_spectra(spectrum)\")\n\n if args.include_sirius_fps:\n conn.execute(\"CREATE INDEX IF NOT EXISTS fpd__molecule__sirius_fps ON fingerprints_data__sirius_fps(molecule)\")\n\n\nif __name__ == \"__main__\":\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\n \"massbank_db_fn\",\n help=\"Pathname of the Massbank database used as a basis to construct the new DB.\"\n )\n arg_parser.add_argument(\"idir\")\n arg_parser.add_argument(\"score_tgz_fn\")\n arg_parser.add_argument(\n \"--pubchem_db_fn\",\n default=\"/home/bach/Documents/doctoral/projects/local_pubchem_db/db_files/pubchem_01-02-2021.sqlite\",\n type=str,\n help=\"Filepath of the PubChem database.\")\n arg_parser.add_argument(\n \"--build_unittest_db\",\n action=\"store_true\",\n help=\"Use this option to create a smaller database (subset of SIRIUS scores) that we can use for unittests.\"\n )\n arg_parser.add_argument(\n \"--include_sirius_fps\",\n action=\"store_true\",\n help=\"Should SIRIUS fingerprints be added to the database\"\n )\n arg_parser.add_argument(\n \"--acc_to_be_removed_fn\",\n type=str,\n help=\"List of grouped accession ids to be removed.\",\n default=\"./grouped_accessions_to_be_removed.txt\"\n )\n args = arg_parser.parse_args()\n\n sqlite3.register_adapter(np.int64, int)\n\n # Copy the base DB to a new file (which will get the SIRIUS scores)\n conn_mb_original = sqlite3.connect(\"file:\" + args.massbank_db_fn + \"?mode=ro\", uri=True)\n conn_mb = sqlite3.connect(\n os.path.join(\n os.path.dirname(args.massbank_db_fn),\n \"Massbank_test_db.sqlite\" if args.build_unittest_db else \"massbank__with_sirius.sqlite\"\n )\n )\n try:\n with conn_mb:\n conn_mb_original.backup(conn_mb)\n finally:\n conn_mb_original.close()\n\n conn_pc = sqlite3.connect(args.pubchem_db_fn)\n\n # Load the grouped accession IDs which should be removed.\n with open(args.acc_to_be_removed_fn, \"r\") as acc_to_be_removed_file:\n acc_to_be_removed = set(l.strip() for l in acc_to_be_removed_file.readlines())\n\n try:\n with conn_mb:\n conn_mb.execute(\"PRAGMA foreign_keys = ON\")\n\n with conn_mb:\n create_tables(conn_mb)\n\n if args.include_sirius_fps:\n conn_mb.execute(\"INSERT OR REPLACE INTO fingerprints_meta \"\n \" VALUES (?, ?, ?, ?, DATETIME('now', 'localtime'), ?, ?, ?, ?, ?)\",\n (\"sirius_fps\", \"UNION\", \"binary\", None, \"CDK: None\", 3047, False, None, None))\n\n conn_mb.execute(\"INSERT OR REPLACE INTO scoring_methods VALUES (?, ?, ?)\",\n (\"sirius__norm\", \"SIRIUS: 4, CSI:FingerID: 1.4.5\",\n \"Dr. Kai Dührkop ran CSI:FingerID to predict the candidate scores in a structure disjoint \"\n \"(sd) fashion. That means, the ground truth molecular structured associated with the \"\n \"Massbank spectra have not been used for training. The correct molecular formula was used \"\n \"to construct the candidate sets. The scores are normalized to range [0, 1].\"))\n\n spectra_idx = 0\n with tarfile.open(os.path.join(args.idir, args.score_tgz_fn), \"r:gz\") as score_archive:\n for entry in score_archive:\n if entry.isreg():\n # Found a spectrum ...\n spectra_idx += 1\n spec_id = os.path.basename(entry.name).split(os.path.extsep)[0] # e.g. AC01111385\n LOGGER.info(\"Process spectrum %05d: id = %s (out of 8537)\" % (spectra_idx, spec_id))\n\n if spec_id in acc_to_be_removed:\n LOGGER.warning(\"We remove id = '%s'. See pull request #152 in the MassBank repository.\")\n continue\n\n # For unittest DBs we only import a sub-set of the spectra\n if args.build_unittest_db and np.random.RandomState(spectra_idx).rand() > 0.005:\n continue\n\n # ==================================\n # Find meta-information for spectrum\n meta_info = None\n\n ds_pref = re.findall(PREDIX_PATTERN, spec_id)[0] # e.g. AC\n for d in glob.iglob(os.path.join(args.idir, \"%s*\" % ds_pref)):\n _df = pd.read_csv(os.path.join(d, \"spectra_summary.tsv\"), sep=\"\\t\")\n try:\n row_idx = _df[\"accession\"].to_list().index(spec_id)\n meta_info = _df.iloc[row_idx, :].copy()\n meta_info[\"dataset\"] = os.path.basename(d) # e.g. AC_003\n break\n except ValueError:\n # Spectrum ID was not found in the dataset\n continue\n\n if meta_info is None:\n # Fail here\n raise RuntimeError(\"[%s] Meta-data for spectrum not found.\" % spec_id)\n # ==================================\n\n # =================================================================================\n # Load candidates and with CSI:FingerID scores and combine with PubChem information\n cands = pd.read_csv(score_archive.extractfile(entry), sep=\"\\t\") # type: pd.DataFrame\n LOGGER.info(\"[%s] Number of candidates (SIRIUS): %d\" % (spec_id, len(cands)))\n\n # Here we add the stereo-isomers for each 2D structure\n cands = pd.merge(\n left=cands,\n right=pd.read_sql(\n \"SELECT * FROM compounds \"\n \" WHERE InChIKey_1 IN %s\"\n \" AND molecular_formula == '%s'\"\n % (MassbankDB._in_sql(cands[\"inchikey\"]), meta_info[\"molecular_formula\"]),\n conn_pc\n ),\n left_on=\"inchikey\", right_on=\"InChIKey_1\", how=\"inner\", suffixes=(\"__SIRIUS\", \"__PUBCHEM\")\n )\n LOGGER.info(\"[%s] Number of candidates (with PubChem match): %d\" % (spec_id, len(cands)))\n\n # --------------------------------------------------------------\n # Filter candidates: Implemented as done in the MetFrag software\n is_not_isotopic = cands[\"InChI\"].apply(lambda x: \"/i\" not in x) # isotopic\n\n is_connected_mf = cands[\"molecular_formula\"].apply(\n lambda x: (\".\" not in x) and (DISCONNECTED_PATTER.match(x) is None)) # disconnected\n\n is_connected_smiles = cands[\"SMILES_ISO\"].apply(lambda x: \".\" not in x)\n\n cands = cands[is_not_isotopic & is_connected_mf & is_connected_smiles]\n\n LOGGER.info(\"[%s] Filtering removed %d candidates.\" %\n (spec_id, (~ (is_not_isotopic & is_connected_mf & is_connected_smiles)).sum()))\n # --------------------------------------------------------------\n\n if meta_info[\"pubchem_id\"] not in cands[\"cid\"].to_list():\n LOGGER.error(\"[%s] Correct molecular structure (cid = %d) is not in the candidate set.\"\n % (spec_id, meta_info[\"pubchem_id\"]))\n continue\n\n _unq_mf = cands[\"molecular_formula\"].unique()\n if meta_info[\"molecular_formula\"] not in _unq_mf:\n LOGGER.error(\"[{}] Correct molecular formula is not in the candidate set: {} not in {}.\"\n .format(spec_id, meta_info[\"molecular_formula\"], _unq_mf))\n continue\n\n if len(_unq_mf) > 1:\n LOGGER.warning(\"[{}] There is more than one molecular formula in the candidate set: {}.\"\n .format(spec_id, _unq_mf))\n # =================================================================================\n\n # ===========================\n # Insert new data into the DB\n with conn_mb:\n # Molecule structures associated with the candidates\n conn_mb.executemany(\"INSERT OR IGNORE INTO molecules VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n [\n (row[\"cid\"], row[\"InChI\"], row[\"InChIKey\"],\n row[\"InChIKey_1\"], row[\"InChIKey_2\"],\n row[\"SMILES_ISO\"], row[\"SMILES_CAN\"],\n row[\"exact_mass\"], row[\"monoisotopic_mass\"],\n row[\"molecular_formula\"], row[\"xlogp3\"])\n for _, row in cands.iterrows()\n ])\n\n # Meta-information about the scored spectra\n conn_mb.execute(\"INSERT OR IGNORE INTO scored_spectra_meta VALUES (?, ?, ?, ?, ?, ?, ?)\",\n (\n meta_info[\"accession\"], meta_info[\"dataset\"],\n meta_info[\"original_accessions\"], meta_info[\"precursor_mz\"],\n meta_info[\"precursor_type\"], meta_info[\"pubchem_id\"],\n meta_info[\"retention_time\"]\n ))\n\n # SIRIUS serves as basis for the candidate sets\n conn_mb.executemany(\"INSERT INTO candidates_spectra VALUES (?, ?)\",\n [\n (meta_info[\"accession\"], row[\"cid\"]) for _, row in cands.iterrows()\n ])\n\n # CSI:FingerID candidate scores (normalize first)\n\n # Make all scores >= 0 by adding the absolute value for the smallest score\n cands[\"score\"] += np.abs(np.min(cands[\"score\"]))\n\n # Make maximum score being 1\n _max_score = np.max(cands[\"score\"])\n if _max_score == 0:\n LOGGER.warning(\"[%s] Max score zero ?\" % spec_id)\n LOGGER.warning(cands[\"score\"])\n\n cands[\"score\"] = np.ones_like(cands[\"score\"])\n else:\n cands[\"score\"] /= _max_score\n\n conn_mb.executemany(\n \"INSERT INTO spectra_candidate_scores VALUES (?, ?, ?, ?, ?)\",\n [\n (\n meta_info[\"accession\"],\n row[\"cid\"],\n \"sirius__norm\",\n meta_info[\"dataset\"],\n row[\"score\"]\n )\n for _, row in cands.iterrows()\n ]\n )\n\n # Insert information about the merged Massbank accessions and their new ids\n conn_mb.executemany(\"INSERT INTO merged_accessions VALUES (?, ?)\",\n [\n (acc, meta_info[\"accession\"])\n for acc in meta_info[\"original_accessions\"].split(\",\")\n ])\n\n # CSI:FingerID fingerprints as index strings\n if args.include_sirius_fps:\n conn_mb.executemany(\n \"INSERT OR IGNORE INTO fingerprints_data__sirius_fps(molecule, bits) VALUES (?, ?)\",\n [\n (\n row[\"cid\"],\n \",\".join(map(str, [idx for idx, fp in enumerate(row[\"fingerprint\"]) if fp == \"1\"]))\n )\n for _, row in cands.iterrows()\n ]\n )\n # ===========================\n\n with conn_mb:\n create_indices(conn_mb)\n\n except RuntimeError as err:\n traceback.print_exc()\n LOGGER.error(err)\n finally:\n conn_mb.close()\n conn_pc.close()\n\n" ]
[ [ "numpy.max", "numpy.random.RandomState", "numpy.ones_like", "numpy.min" ] ]
lpworld/LatentUnexp
[ "55cfa214733d6fd31f8113b8ae47f83e03984f0d" ]
[ "surprise/similarities.py" ]
[ "\"\"\"\nThe :mod:`similarities <surprise.similarities>` module includes tools to\ncompute similarity metrics between users or items. You may need to refer to the\n:ref:`notation_standards` page. See also the\n:ref:`similarity_measures_configuration` section of the User Guide.\n\nAvailable similarity measures:\n\n.. autosummary::\n :nosignatures:\n\n cosine\n msd\n pearson\n pearson_baseline\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport numpy as np\n\nfrom six.moves import range\nfrom six import iteritems\n\n\ndef cosine(n_x, yr, min_support):\n \"\"\"Compute the cosine similarity between all pairs of users (or items).\n\n Only **common** users (or items) are taken into account. The cosine\n similarity is defined as:\n\n .. math::\n \\\\text{cosine_sim}(u, v) = \\\\frac{\n \\\\sum\\\\limits_{i \\in I_{uv}} r_{ui} \\cdot r_{vi}}\n {\\\\sqrt{\\\\sum\\\\limits_{i \\in I_{uv}} r_{ui}^2} \\cdot\n \\\\sqrt{\\\\sum\\\\limits_{i \\in I_{uv}} r_{vi}^2}\n }\n\n or\n\n .. math::\n \\\\text{cosine_sim}(i, j) = \\\\frac{\n \\\\sum\\\\limits_{u \\in U_{ij}} r_{ui} \\cdot r_{uj}}\n {\\\\sqrt{\\\\sum\\\\limits_{u \\in U_{ij}} r_{ui}^2} \\cdot\n \\\\sqrt{\\\\sum\\\\limits_{u \\in U_{ij}} r_{uj}^2}\n }\n\n depending on the ``user_based`` field of ``sim_options`` (see\n :ref:`similarity_measures_configuration`).\n\n For details on cosine similarity, see on `Wikipedia\n <https://en.wikipedia.org/wiki/Cosine_similarity#Definition>`__.\n \"\"\"\n\n\n min_sprt = min_support\n prods = np.zeros((n_x, n_x), np.double)\n freq = np.zeros((n_x, n_x), np.int)\n sqi = np.zeros((n_x, n_x), np.double)\n sqj = np.zeros((n_x, n_x), np.double)\n sim = np.zeros((n_x, n_x), np.double)\n\n for y, y_ratings in iteritems(yr):\n for xi, ri in y_ratings:\n for xj, rj in y_ratings:\n freq[xi, xj] += 1\n prods[xi, xj] += ri * rj\n sqi[xi, xj] += ri**2\n sqj[xi, xj] += rj**2\n\n for xi in range(n_x):\n sim[xi, xi] = 1\n for xj in range(xi + 1, n_x):\n if freq[xi, xj] < min_sprt:\n sim[xi, xj] = 0\n else:\n denum = np.sqrt(sqi[xi, xj] * sqj[xi, xj])\n sim[xi, xj] = prods[xi, xj] / denum\n\n sim[xj, xi] = sim[xi, xj]\n\n return sim\n\n\ndef msd(n_x, yr, min_support):\n \"\"\"Compute the Mean Squared Difference similarity between all pairs of\n users (or items).\n\n Only **common** users (or items) are taken into account. The Mean Squared\n Difference is defined as:\n\n .. math ::\n \\\\text{msd}(u, v) = \\\\frac{1}{|I_{uv}|} \\cdot\n \\\\sum\\\\limits_{i \\in I_{uv}} (r_{ui} - r_{vi})^2\n\n or\n\n .. math ::\n \\\\text{msd}(i, j) = \\\\frac{1}{|U_{ij}|} \\cdot\n \\\\sum\\\\limits_{u \\in U_{ij}} (r_{ui} - r_{uj})^2\n\n depending on the ``user_based`` field of ``sim_options`` (see\n :ref:`similarity_measures_configuration`).\n\n The MSD-similarity is then defined as:\n\n .. math ::\n \\\\text{msd_sim}(u, v) &= \\\\frac{1}{\\\\text{msd}(u, v) + 1}\\\\\\\\\n \\\\text{msd_sim}(i, j) &= \\\\frac{1}{\\\\text{msd}(i, j) + 1}\n\n The :math:`+ 1` term is just here to avoid dividing by zero.\n\n\n For details on MSD, see third definition on `Wikipedia\n <https://en.wikipedia.org/wiki/Root-mean-square_deviation#Formula>`__.\n\n \"\"\"\n\n\n min_sprt = min_support\n sq_diff = np.zeros((n_x, n_x), np.double)\n freq = np.zeros((n_x, n_x), np.int)\n sim = np.zeros((n_x, n_x), np.double)\n\n for y, y_ratings in iteritems(yr):\n for xi, ri in y_ratings:\n for xj, rj in y_ratings:\n sq_diff[xi, xj] += (ri - rj)**2\n freq[xi, xj] += 1\n\n for xi in range(n_x):\n sim[xi, xi] = 1 # completely arbitrary and useless anyway\n for xj in range(xi + 1, n_x):\n if freq[xi, xj] < min_sprt:\n sim[xi, xj] == 0\n else:\n # return inverse of (msd + 1) (+ 1 to avoid dividing by zero)\n sim[xi, xj] = 1 / (sq_diff[xi, xj] / freq[xi, xj] + 1)\n\n sim[xj, xi] = sim[xi, xj]\n\n return sim\n\n\ndef pearson(n_x, yr, min_support):\n \"\"\"Compute the Pearson correlation coefficient between all pairs of users\n (or items).\n\n Only **common** users (or items) are taken into account. The Pearson\n correlation coefficient can be seen as a mean-centered cosine similarity,\n and is defined as:\n\n .. math ::\n \\\\text{pearson_sim}(u, v) = \\\\frac{ \\\\sum\\\\limits_{i \\in I_{uv}}\n (r_{ui} - \\mu_u) \\cdot (r_{vi} - \\mu_{v})} {\\\\sqrt{\\\\sum\\\\limits_{i\n \\in I_{uv}} (r_{ui} - \\mu_u)^2} \\cdot \\\\sqrt{\\\\sum\\\\limits_{i \\in\n I_{uv}} (r_{vi} - \\mu_{v})^2} }\n\n or\n\n .. math ::\n \\\\text{pearson_sim}(i, j) = \\\\frac{ \\\\sum\\\\limits_{u \\in U_{ij}}\n (r_{ui} - \\mu_i) \\cdot (r_{uj} - \\mu_{j})} {\\\\sqrt{\\\\sum\\\\limits_{u\n \\in U_{ij}} (r_{ui} - \\mu_i)^2} \\cdot \\\\sqrt{\\\\sum\\\\limits_{u \\in\n U_{ij}} (r_{uj} - \\mu_{j})^2} }\n\n depending on the ``user_based`` field of ``sim_options`` (see\n :ref:`similarity_measures_configuration`).\n\n\n Note: if there are no common users or items, similarity will be 0 (and not\n -1).\n\n For details on Pearson coefficient, see `Wikipedia\n <https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient#For_a_sample>`__.\n\n \"\"\"\n\n min_sprt = min_support\n freq = np.zeros((n_x, n_x), np.int)\n prods = np.zeros((n_x, n_x), np.double)\n sqi = np.zeros((n_x, n_x), np.double)\n sqj = np.zeros((n_x, n_x), np.double)\n si = np.zeros((n_x, n_x), np.double)\n sj = np.zeros((n_x, n_x), np.double)\n sim = np.zeros((n_x, n_x), np.double)\n\n for y, y_ratings in iteritems(yr):\n for xi, ri in y_ratings:\n for xj, rj in y_ratings:\n prods[xi, xj] += ri * rj\n freq[xi, xj] += 1\n sqi[xi, xj] += ri**2\n sqj[xi, xj] += rj**2\n si[xi, xj] += ri\n sj[xi, xj] += rj\n\n for xi in range(n_x):\n sim[xi, xi] = 1\n for xj in range(xi + 1, n_x):\n\n if freq[xi, xj] < min_sprt:\n sim[xi, xj] == 0\n else:\n n = freq[xi, xj]\n num = n * prods[xi, xj] - si[xi, xj] * sj[xi, xj]\n denum = np.sqrt((n * sqi[xi, xj] - si[xi, xj]**2) *\n (n * sqj[xi, xj] - sj[xi, xj]**2))\n if denum == 0:\n sim[xi, xj] = 0\n else:\n sim[xi, xj] = num / denum\n\n sim[xj, xi] = sim[xi, xj]\n\n return sim\n\n\ndef pearson_baseline(n_x, yr, min_support, global_mean, x_biases, y_biases,\n shrinkage=100):\n \"\"\"Compute the (shrunk) Pearson correlation coefficient between all pairs\n of users (or items) using baselines for centering instead of means.\n\n The shrinkage parameter helps to avoid overfitting when only few ratings\n are available (see :ref:`similarity_measures_configuration`).\n\n The Pearson-baseline correlation coefficient is defined as:\n\n .. math::\n \\\\text{pearson_baseline_sim}(u, v) = \\hat{\\\\rho}_{uv} = \\\\frac{\n \\\\sum\\\\limits_{i \\in I_{uv}} (r_{ui} - b_{ui}) \\cdot (r_{vi} -\n b_{vi})} {\\\\sqrt{\\\\sum\\\\limits_{i \\in I_{uv}} (r_{ui} - b_{ui})^2}\n \\cdot \\\\sqrt{\\\\sum\\\\limits_{i \\in I_{uv}} (r_{vi} - b_{vi})^2}}\n\n or\n\n .. math::\n \\\\text{pearson_baseline_sim}(i, j) = \\hat{\\\\rho}_{ij} = \\\\frac{\n \\\\sum\\\\limits_{u \\in U_{ij}} (r_{ui} - b_{ui}) \\cdot (r_{uj} -\n b_{uj})} {\\\\sqrt{\\\\sum\\\\limits_{u \\in U_{ij}} (r_{ui} - b_{ui})^2}\n \\cdot \\\\sqrt{\\\\sum\\\\limits_{u \\in U_{ij}} (r_{uj} - b_{uj})^2}}\n\n The shrunk Pearson-baseline correlation coefficient is then defined as:\n\n .. math::\n \\\\text{pearson_baseline_shrunk_sim}(u, v) &= \\\\frac{|I_{uv}| - 1}\n {|I_{uv}| - 1 + \\\\text{shrinkage}} \\\\cdot \\hat{\\\\rho}_{uv}\n\n \\\\text{pearson_baseline_shrunk_sim}(i, j) &= \\\\frac{|U_{ij}| - 1}\n {|U_{ij}| - 1 + \\\\text{shrinkage}} \\\\cdot \\hat{\\\\rho}_{ij}\n\n\n Obviously, a shrinkage parameter of 0 amounts to no shrinkage at all.\n\n Note: here again, if there are no common users/items, similarity will be 0\n (and not -1).\n\n Motivations for such a similarity measure can be found on the *Recommender\n System Handbook*, section 5.4.1.\n \"\"\"\n\n\n min_sprt = min_support\n global_mean_ = global_mean\n freq = np.zeros((n_x, n_x), np.int)\n prods = np.zeros((n_x, n_x), np.double)\n sq_diff_i = np.zeros((n_x, n_x), np.double)\n sq_diff_j = np.zeros((n_x, n_x), np.double)\n sim = np.zeros((n_x, n_x), np.double)\n\n x_biases_ = x_biases\n y_biases_ = y_biases\n\n # Need this because of shrinkage. When pearson coeff is zero when support\n # is 1, so that's OK.\n min_sprt = max(2, min_sprt)\n\n for y, y_ratings in iteritems(yr):\n partial_bias = global_mean_ + y_biases_[y]\n for xi, ri in y_ratings:\n for xj, rj in y_ratings:\n freq[xi, xj] += 1\n diff_i = (ri - (partial_bias + x_biases_[xi]))\n diff_j = (rj - (partial_bias + x_biases_[xj]))\n prods[xi, xj] += diff_i * diff_j\n sq_diff_i[xi, xj] += diff_i**2\n sq_diff_j[xi, xj] += diff_j**2\n\n for xi in range(n_x):\n sim[xi, xi] = 1\n for xj in range(xi + 1, n_x):\n if freq[xi, xj] < min_sprt:\n sim[xi, xj] = 0\n else:\n sim[xi, xj] = prods[xi, xj] / (np.sqrt(sq_diff_i[xi, xj] *\n sq_diff_j[xi, xj]))\n # the shrinkage part\n sim[xi, xj] *= (freq[xi, xj] - 1) / (freq[xi, xj] - 1 +\n shrinkage)\n\n sim[xj, xi] = sim[xi, xj]\n\n return sim\n" ]
[ [ "numpy.zeros", "numpy.sqrt" ] ]
glynpu/lhotse
[ "1d7807025575fdaa96cb907c451db0fb0fd23cde" ]
[ "test/cut/test_cut_mixing.py" ]
[ "from math import isclose\n\nimport numpy as np\nimport pytest\n\nfrom lhotse.cut import CutSet, MixedCut, MonoCut\nfrom lhotse.supervision import SupervisionSegment\nfrom lhotse.testing.dummies import remove_spaces_from_segment_text\nfrom lhotse.utils import nullcontext as does_not_raise\n\n\n# Note:\n# Definitions for `cut1`, `cut2` and `cut_set` parameters are standard Pytest fixtures located in test/cut/conftest.py\n\n\ndef test_append_cut_duration_and_supervisions(cut1, cut2):\n appended_cut = cut1.append(cut2)\n\n assert isinstance(appended_cut, MixedCut)\n assert appended_cut.duration == 20.0\n assert appended_cut.supervisions == [\n SupervisionSegment(\n id=\"sup-1\", recording_id=\"irrelevant\", start=0.5, duration=6.0\n ),\n SupervisionSegment(\n id=\"sup-2\", recording_id=\"irrelevant\", start=7.0, duration=2.0\n ),\n SupervisionSegment(\n id=\"sup-3\", recording_id=\"irrelevant\", start=13.0, duration=2.5\n ),\n ]\n\n\[email protected](\n [\"offset\", \"allow_padding\", \"expected_duration\", \"exception_expectation\"],\n [\n (0, False, 10.0, does_not_raise()),\n (1, False, 11.0, does_not_raise()),\n (5, False, 15.0, does_not_raise()),\n (10, False, 20.0, does_not_raise()),\n (100, False, \"irrelevant\", pytest.raises(AssertionError)),\n (100, True, 110.0, does_not_raise()),\n ],\n)\ndef test_overlay_cut_duration_and_supervisions(\n offset, allow_padding, expected_duration, exception_expectation, cut1, cut2\n):\n with exception_expectation:\n mixed_cut = cut1.mix(cut2, offset_other_by=offset, allow_padding=allow_padding)\n\n assert isinstance(mixed_cut, MixedCut)\n assert mixed_cut.duration == expected_duration\n assert mixed_cut.supervisions == [\n SupervisionSegment(\n id=\"sup-1\", recording_id=\"irrelevant\", start=0.5, duration=6.0\n ),\n SupervisionSegment(\n id=\"sup-2\", recording_id=\"irrelevant\", start=7.0, duration=2.0\n ),\n SupervisionSegment(\n id=\"sup-3\", recording_id=\"irrelevant\", start=3.0 + offset, duration=2.5\n ),\n ]\n\n\[email protected]\ndef mixed_feature_cut() -> MixedCut:\n cut_set = CutSet.from_json(\"test/fixtures/mix_cut_test/overlayed_cut_manifest.json\")\n mixed_cut = cut_set[\"mixed-cut-id\"]\n assert mixed_cut.num_frames == 1360\n assert isclose(mixed_cut.duration, 13.595)\n return mixed_cut\n\n\ndef test_mixed_cut_load_features_mixed(mixed_feature_cut):\n feats = mixed_feature_cut.load_features()\n assert feats.shape[0] == 1360\n\n\ndef test_mixed_cut_load_features_unmixed(mixed_feature_cut):\n feats = mixed_feature_cut.load_features(mixed=False)\n assert feats.shape[0] == 2\n assert feats.shape[1] == 1360\n\n\ndef test_mixed_cut_map_supervisions(mixed_feature_cut):\n for s in mixed_feature_cut.map_supervisions(\n remove_spaces_from_segment_text\n ).supervisions:\n if s.text is not None:\n assert \" \" not in s.text\n\n\[email protected]\ndef mixed_audio_cut() -> MixedCut:\n cut_set = CutSet.from_json(\n \"test/fixtures/mix_cut_test/overlayed_audio_cut_manifest.json\"\n )\n mixed_cut = cut_set[\"mixed-cut-id\"]\n assert isclose(mixed_cut.duration, 14.4)\n return mixed_cut\n\n\ndef test_mixed_cut_load_audio_mixed(mixed_audio_cut):\n audio = mixed_audio_cut.load_audio()\n assert audio.shape == (1, 230400)\n\n\ndef test_mixed_cut_load_audio_unmixed(mixed_audio_cut):\n audio = mixed_audio_cut.load_audio(mixed=False)\n assert audio.shape == (2, 230400)\n\n\[email protected]\ndef libri_cut_set():\n return CutSet.from_json(\"test/fixtures/libri/cuts.json\")\n\n\[email protected]\ndef libri_cut(libri_cut_set) -> MonoCut:\n return libri_cut_set[\"e3e70682-c209-4cac-629f-6fbed82c07cd\"]\n\n\ndef E(x):\n if x.shape[0] == 1:\n # audio\n return np.sum(x**2)\n # fbank\n return np.sum(np.exp(x))\n\n\ndef test_mix_cut_snr(libri_cut):\n mixed = libri_cut.pad(duration=20).mix(libri_cut, offset_other_by=10)\n mixed_snr = libri_cut.pad(duration=20).mix(libri_cut, offset_other_by=10, snr=10)\n\n assert len(mixed.tracks) == 3\n assert len(mixed_snr.tracks) == 3\n\n audio = mixed.load_audio()\n audio_snr = mixed_snr.load_audio()\n feats = mixed.load_features()\n feats_snr = mixed_snr.load_features()\n\n for item in (audio, audio_snr, feats, feats_snr):\n assert E(item) > 0\n\n # Cuts mixed without SNR specified should have a higher energy in feature and audio domains.\n assert E(audio) > E(audio_snr)\n assert E(feats) > E(feats_snr)\n\n\ndef test_mix_cut_snr_truncate_snr_reference(libri_cut):\n mixed = libri_cut.pad(duration=20).mix(libri_cut, offset_other_by=10)\n mixed_snr = libri_cut.pad(duration=20).mix(libri_cut, offset_other_by=10, snr=10)\n\n # truncate enough to remove the first cut\n mixed = mixed.truncate(offset=18)\n mixed_snr = mixed_snr.truncate(offset=18)\n\n assert len(mixed.tracks) == 2\n assert len(mixed_snr.tracks) == 2\n\n audio = mixed.load_audio()\n audio_snr = mixed_snr.load_audio()\n feats = mixed.load_features()\n feats_snr = mixed_snr.load_features()\n\n for item in (audio, audio_snr, feats, feats_snr):\n assert E(item) > 0\n\n # Both cuts with have identical energies, as the SNR reference was removed in `mixed_snr`,\n # and the only remaining non-padding cut is the one that was mixed in.\n assert E(audio) == E(audio_snr)\n assert E(feats) == E(feats_snr)\n\n\ndef test_mix_cut_snr_pad_both(libri_cut):\n # Pad from both sides, then mix in some \"noise\" at the beginning.\n # The SNR should refer to the original cut, and not to the mixed in noise.\n padded = libri_cut.pad(duration=20, direction=\"both\")\n mixed = padded.mix(libri_cut)\n mixed_snr = padded.mix(libri_cut, snr=10)\n\n assert isinstance(padded, MixedCut)\n assert len(padded.tracks) == 3\n assert len(mixed.tracks) == 4\n assert len(mixed_snr.tracks) == 4\n\n audio = padded.load_audio()\n audio_nosnr = mixed.load_audio()\n audio_snr = mixed_snr.load_audio()\n feats = padded.load_features()\n feats_nosnr = mixed.load_features()\n feats_snr = mixed_snr.load_features()\n\n for item in (audio, audio_nosnr, audio_snr, feats, feats_nosnr, feats_snr):\n assert E(item) > 0\n\n # Cuts mixed without SNR specified should have a higher energy in feature and audio domains.\n # Note: if any of those are equal, it means some operation had no effect\n # (a bug this test is preventing against).\n assert E(audio_snr) > E(audio)\n assert E(audio_nosnr) > E(audio)\n assert E(audio_nosnr) > E(audio_snr)\n assert E(feats_snr) > E(feats)\n assert E(feats_nosnr) > E(feats)\n assert E(feats_nosnr) > E(feats_snr)\n" ]
[ [ "numpy.exp", "numpy.sum" ] ]
lbeltrame/bcbio-nextgen
[ "1135176df8cb6a47ae39f997ffa4eaac17f8b4ff" ]
[ "bcbio/ngsalign/tophat.py" ]
[ "\"\"\"Next-gen alignments with TopHat a spliced read mapper for RNA-seq experiments.\n\nhttp://tophat.cbcb.umd.edu\n\"\"\"\nimport os\nimport shutil\nimport glob\nimport subprocess\n\nimport numpy\nimport pysam\n\nfrom bcbio.pipeline import config_utils\nfrom bcbio.ngsalign import bowtie, bowtie2\nfrom bcbio.utils import safe_makedir, file_exists, get_in, symlink_plus\nfrom bcbio.distributed.transaction import file_transaction\nfrom bcbio.provenance import do\nfrom bcbio import bam, broad, utils\nimport bcbio.pipeline.datadict as dd\n\n\ndef _set_quality_flag(options, data):\n qual_format = dd.get_quality_format(data)\n if qual_format.lower() == \"illumina\":\n options[\"solexa1.3-quals\"] = True\n elif qual_format.lower() == \"solexa\":\n options[\"solexa-quals\"] = True\n return options\n\ndef _set_transcriptome_option(options, data, ref_file):\n # prefer transcriptome-index vs a GTF file if available\n transcriptome_index = get_in(data, (\"genome_resources\", \"rnaseq\",\n \"transcriptome_index\", \"tophat\"))\n fusion_mode = _should_run_fusion(data)\n if transcriptome_index and file_exists(transcriptome_index) and not fusion_mode:\n options[\"transcriptome-index\"] = os.path.splitext(transcriptome_index)[0]\n return options\n\n gtf_file = dd.get_gtf_file(data)\n if gtf_file:\n options[\"GTF\"] = gtf_file\n return options\n\n return options\n\ndef _set_cores(options, config):\n num_cores = config[\"algorithm\"].get(\"num_cores\", 0)\n if num_cores > 1 and \"num-threads\" not in options:\n options[\"num-threads\"] = num_cores\n return options\n\ndef _set_rg_options(options, names):\n if not names:\n return options\n options[\"rg-id\"] = names[\"rg\"]\n options[\"rg-sample\"] = names[\"sample\"]\n options[\"rg-library\"] = names[\"lb\"] or names[\"pl\"]\n options[\"rg-platform-unit\"] = names[\"pu\"]\n return options\n\ndef _set_stranded_flag(options, config):\n strand_flag = {\"unstranded\": \"fr-unstranded\",\n \"firststrand\": \"fr-firststrand\",\n \"secondstrand\": \"fr-secondstrand\",\n \"auto\": \"fr-unstranded\"}\n stranded = get_in(config, (\"algorithm\", \"strandedness\"), \"unstranded\").lower()\n assert stranded in strand_flag, (\"%s is not a valid strandedness value. \"\n \"Valid values are 'firststrand', \"\n \"'secondstrand' and 'unstranded\" % (stranded))\n flag = strand_flag[stranded]\n options[\"library-type\"] = flag\n return options\n\ndef _set_fusion_mode(options, data):\n if _should_run_fusion(data):\n options[\"fusion-search\"] = True\n return options\n\ndef tophat_align(fastq_file, pair_file, ref_file, out_base, align_dir, data,\n names=None):\n \"\"\"\n run alignment using Tophat v2\n \"\"\"\n config = data[\"config\"]\n options = get_in(config, (\"resources\", \"tophat\", \"options\"), {})\n options = _set_fusion_mode(options, data)\n options = _set_quality_flag(options, data)\n options = _set_transcriptome_option(options, data, ref_file)\n options = _set_cores(options, config)\n options = _set_rg_options(options, names)\n options = _set_stranded_flag(options, config)\n\n ref_file, runner = _determine_aligner_and_reference(ref_file, data)\n\n # fusion search does not work properly with Bowtie2\n if options.get(\"fusion-search\", False):\n ref_file = ref_file.replace(\"/bowtie2\", \"/bowtie\")\n\n if _tophat_major_version(config) == 1:\n raise NotImplementedError(\"Tophat versions < 2.0 are not supported, please \"\n \"download the newest version of Tophat here: \"\n \"http://tophat.cbcb.umd.edu\")\n\n if _ref_version(ref_file) == 1 or options.get(\"fusion-search\", False):\n options[\"bowtie1\"] = True\n\n out_dir = os.path.join(align_dir, \"%s_tophat\" % out_base)\n final_out = os.path.join(out_dir, \"{0}.bam\".format(names[\"sample\"]))\n if file_exists(final_out):\n return final_out\n\n out_file = os.path.join(out_dir, \"accepted_hits.bam\")\n unmapped = os.path.join(out_dir, \"unmapped.bam\")\n files = [ref_file, fastq_file]\n if not file_exists(out_file):\n with file_transaction(config, out_dir) as tx_out_dir:\n safe_makedir(tx_out_dir)\n if pair_file and not options.get(\"mate-inner-dist\", None):\n d, d_stdev = _estimate_paired_innerdist(fastq_file, pair_file,\n ref_file, out_base,\n tx_out_dir, data)\n options[\"mate-inner-dist\"] = d\n options[\"mate-std-dev\"] = d_stdev\n files.append(pair_file)\n options[\"output-dir\"] = tx_out_dir\n options[\"no-coverage-search\"] = True\n options[\"no-mixed\"] = True\n cmd = [utils.get_program_python(\"tophat\"), config_utils.get_program(\"tophat\", config)]\n for k, v in options.items():\n if v is True:\n cmd.append(\"--%s\" % k)\n else:\n assert not isinstance(v, bool)\n cmd.append(\"--%s=%s\" % (k, v))\n # tophat requires options before arguments, otherwise it silently ignores them\n cmd += files\n do.run(cmd, \"Running Tophat on %s and %s.\" % (fastq_file, pair_file))\n if pair_file and _has_alignments(out_file):\n fixed = _fix_mates(out_file, os.path.join(out_dir, \"%s-align.bam\" % out_base),\n ref_file, config)\n else:\n fixed = out_file\n fixed_unmapped = _fix_unmapped(fixed, unmapped, data)\n fixed = merge_unmapped(fixed, fixed_unmapped, config)\n fixed = _add_rg(fixed, config, names)\n fixed = bam.sort(fixed, config)\n picard = broad.runner_from_path(\"picard\", config)\n # set the contig order to match the reference file so GATK works\n fixed = picard.run_fn(\"picard_reorder\", fixed, data[\"sam_ref\"],\n os.path.splitext(fixed)[0] + \".picard.bam\")\n fixed = fix_insert_size(fixed, config)\n if not file_exists(final_out):\n symlink_plus(fixed, final_out)\n return final_out\n\ndef merge_unmapped(bam_file, unmapped_bam, config):\n merged_bam = os.path.join(os.path.dirname(bam_file), \"merged.bam\")\n if not file_exists(merged_bam):\n merged_bam = bam.merge([bam_file, unmapped_bam], merged_bam, config)\n return merged_bam\n\ndef _has_alignments(sam_file):\n with open(sam_file) as in_handle:\n try:\n for line in in_handle:\n if line.startswith(\"File removed to save disk space\"):\n return False\n elif not line.startswith(\"@\"):\n return True\n except UnicodeDecodeError:\n return not bam.is_empty(sam_file)\n return False\n\ndef _fix_mates(orig_file, out_file, ref_file, config):\n \"\"\"Fix problematic unmapped mate pairs in TopHat output.\n\n TopHat 2.0.9 appears to have issues with secondary reads:\n https://groups.google.com/forum/#!topic/tuxedo-tools-users/puLfDNbN9bo\n This cleans the input file to only keep properly mapped pairs,\n providing a general fix that will handle correctly mapped secondary\n reads as well.\n \"\"\"\n if not file_exists(out_file):\n with file_transaction(config, out_file) as tx_out_file:\n samtools = config_utils.get_program(\"samtools\", config)\n cmd = \"{samtools} view -bS -h -t {ref_file}.fai -F 8 {orig_file} > {tx_out_file}\"\n do.run(cmd.format(**locals()), \"Fix mate pairs in TopHat output\", {})\n return out_file\n\ndef _add_rg(unmapped_file, config, names):\n \"\"\"Add the missing RG header.\"\"\"\n picard = broad.runner_from_path(\"picard\", config)\n rg_fixed = picard.run_fn(\"picard_fix_rgs\", unmapped_file, names)\n return rg_fixed\n\n\ndef _fix_unmapped(mapped_file, unmapped_file, data):\n \"\"\"\n The unmapped.bam file up until at least Tophat 2.1.1 is broken in various\n ways, see https://github.com/cbrueffer/tophat-recondition for details.\n Run TopHat-Recondition to fix these issues.\n \"\"\"\n out_file = os.path.splitext(unmapped_file)[0] + \"_fixup.bam\"\n if file_exists(out_file):\n return out_file\n\n assert os.path.dirname(mapped_file) == os.path.dirname(unmapped_file)\n\n cmd = config_utils.get_program(\"tophat-recondition\", data)\n cmd += \" -q\"\n tophat_out_dir = os.path.dirname(mapped_file)\n tophat_logfile = os.path.join(tophat_out_dir, 'tophat-recondition.log')\n\n with file_transaction(data, tophat_logfile) as tx_logfile:\n cmd += ' --logfile %s' % tx_logfile\n cmd += \" -m %s\" % mapped_file\n cmd += \" -u %s\" % unmapped_file\n cmd += \" %s\" % tophat_out_dir\n do.run(cmd, \"Fixing unmapped reads with Tophat-Recondition.\", None)\n\n return out_file\n\n\ndef align(fastq_file, pair_file, ref_file, names, align_dir, data,):\n out_files = tophat_align(fastq_file, pair_file, ref_file, names[\"lane\"],\n align_dir, data, names)\n\n return out_files\n\n\ndef _estimate_paired_innerdist(fastq_file, pair_file, ref_file, out_base,\n out_dir, data):\n \"\"\"Use Bowtie to estimate the inner distance of paired reads.\n \"\"\"\n mean, stdev = _bowtie_for_innerdist(\"100000\", fastq_file, pair_file, ref_file,\n out_base, out_dir, data, True)\n if not mean or not stdev:\n mean, stdev = _bowtie_for_innerdist(\"1\", fastq_file, pair_file, ref_file,\n out_base, out_dir, data, True)\n # No reads aligning so no data to process, set some default values\n if not mean or not stdev:\n mean, stdev = 200, 50\n\n return mean, stdev\n\n\ndef _bowtie_for_innerdist(start, fastq_file, pair_file, ref_file, out_base,\n out_dir, data, remove_workdir=False):\n work_dir = os.path.join(out_dir, \"innerdist_estimate\")\n if os.path.exists(work_dir):\n shutil.rmtree(work_dir)\n safe_makedir(work_dir)\n extra_args = [\"-s\", str(start), \"-u\", \"250000\"]\n ref_file, bowtie_runner = _determine_aligner_and_reference(ref_file, data)\n out_sam = bowtie_runner.align(fastq_file, pair_file, ref_file, {\"lane\": out_base},\n work_dir, data, extra_args)\n dists = []\n with pysam.Samfile(out_sam) as work_sam:\n for read in work_sam:\n if read.is_proper_pair and read.is_read1:\n dists.append(abs(read.isize) - 2 * read.rlen)\n if dists:\n median = float(numpy.median(dists))\n deviations = []\n for d in dists:\n deviations.append(abs(d - median))\n # this is the median absolute deviation estimator of the\n # standard deviation\n mad = 1.4826 * float(numpy.median(deviations))\n return int(median), int(mad)\n else:\n return None, None\n\ndef _calculate_average_read_length(sam_file):\n with pysam.Samfile(sam_file) as work_sam:\n count = 0\n read_lengths = []\n for read in work_sam:\n count = count + 1\n read_lengths.append(read.rlen)\n avg_read_length = int(float(sum(read_lengths)) / float(count))\n return avg_read_length\n\n\ndef _bowtie_major_version(stdout):\n \"\"\"\n bowtie --version returns strings like this:\n bowtie version 0.12.7\n 32-bit\n Built on Franklin.local\n Tue Sep 7 14:25:02 PDT 2010\n \"\"\"\n version_line = stdout.split(\"\\n\")[0]\n version_string = version_line.strip().split()[2]\n major_version = int(version_string.split(\".\")[0])\n # bowtie version 1 has a leading character of 0 or 1\n if major_version == 0 or major_version == 1:\n major_version = 1\n return major_version\n\n\ndef _should_run_fusion(data):\n return dd.get_fusion_caller(data)\n\ndef _determine_aligner_and_reference(ref_file, data):\n fusion_mode = _should_run_fusion(data)\n # fusion_mode only works with bowtie1\n if fusion_mode:\n return _get_bowtie_with_reference(ref_file, 1)\n else:\n return _get_bowtie_with_reference(ref_file, 2)\n\ndef _get_bowtie_with_reference(ref_file, version):\n if version == 1:\n ref_file = ref_file.replace(\"/bowtie2/\", \"/bowtie/\")\n return ref_file, bowtie\n else:\n ref_file = ref_file.replace(\"/bowtie/\", \"/bowtie2/\")\n return ref_file, bowtie2\n\n\ndef _tophat_major_version(config):\n cmd = [\n utils.get_program_python(\"tophat\"),\n config_utils.get_program(\"tophat\", config, default=\"tophat\"),\n \"--version\"\n ]\n\n # tophat --version returns strings like this: Tophat v2.0.4\n version_string = str(subprocess.check_output(cmd)).strip().split()[1]\n major_version = int(version_string.split(\".\")[0][1:])\n return major_version\n\n\ndef _ref_version(ref_file):\n for ext in [os.path.splitext(x)[1] for x in glob.glob(ref_file + \"*\")]:\n if ext == \".ebwt\":\n return 1\n elif ext == \".bt2\":\n return 2\n raise ValueError(\"Cannot detect which reference version %s is. \"\n \"Should end in either .ebwt (bowtie) or .bt2 \"\n \"(bowtie2).\" % (ref_file))\n\ndef fix_insert_size(in_bam, config):\n \"\"\"\n Tophat sets PI in the RG to be the inner distance size, but the SAM spec\n states should be the insert size. This fixes the RG in the alignment\n file generated by Tophat header to match the spec\n \"\"\"\n fixed_file = os.path.splitext(in_bam)[0] + \".pi_fixed.bam\"\n if file_exists(fixed_file):\n return fixed_file\n header_file = os.path.splitext(in_bam)[0] + \".header.sam\"\n read_length = bam.estimate_read_length(in_bam)\n bam_handle= bam.open_samfile(in_bam)\n header = bam_handle.header.copy()\n rg_dict = header['RG'][0]\n if 'PI' not in rg_dict:\n return in_bam\n PI = int(rg_dict.get('PI'))\n PI = PI + 2*read_length\n rg_dict['PI'] = PI\n header['RG'][0] = rg_dict\n with pysam.Samfile(header_file, \"wb\", header=header) as out_handle:\n with bam.open_samfile(in_bam) as in_handle:\n for record in in_handle:\n out_handle.write(record)\n shutil.move(header_file, fixed_file)\n return fixed_file\n" ]
[ [ "numpy.median" ] ]
ewulczyn/ewulczyn.github.io
[ "8e77b3a9a6a484696678f5b937b12f5742c05e99" ]
[ "ipython/How_Naive_AB_Testing_Goes_Wrong/abstract_abtest.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom abtest_util import SimStream\nfrom abc import ABCMeta, abstractmethod\n\nclass ABTest(object):\n \"\"\"\n This is the base class for dynamically\n terminating AB tests. The idea is that you define\n a stopping crietion and evaluate it every n records\n until you get the stop signal.\n \"\"\"\n __metaclass__ = ABCMeta\n def __init__(self, a_stream, b_stream, test_interval, max_run):\n self.a_stream = a_stream # a banner data stream object for banner A\n self.b_stream = b_stream # a banner data stream object for banner B\n self.max_run = max_run # the maximum number of samples per banner\n self.test_interval = test_interval # evalaute stopping criterion every test_interval records\n self.a_estimator = None # an object that collects stats on banner A\n self.b_estimator = None # an object that collects stats on banner B\n self.has_run = False # flag to see if the test has already been run once\n\n\n def run(self):\n \"\"\"\n This function runs the banners for test_interval records\n Until the evaluate_stopping_criterium function returns a winner\n or the maximum sample size is reached \n \"\"\"\n if self.has_run:\n print (\"This test already ran\")\n return\n\n while True:\n a_records = self.a_stream.get_next_records(self.test_interval)\n b_records = self.b_stream.get_next_records(self.test_interval)\n self.a_estimator.update(a_records)\n self.b_estimator.update(b_records)\n result = self.evaluate_stopping_criterium()\n if result != 'continue':\n self.has_run = True\n return result\n\n @abstractmethod\n def evaluate_stopping_criterium(self):\n \"\"\"\n Each child class needs to define a criterion for stopping the test\n \"\"\"\n pass\n\n\ndef expected_results(TestClass, params, iters):\n \"\"\"\n Evaluates a test with the same parameters multiple times\n to get the expected results.\n\n Args:\n TestClass: AB Test Class\n params: parmaters for instantiating AB Test class\n iters: number of times to run the Test object with the set of params\n\n Returns:\n Prob(A wins), P(unkown winner), list of run times\n\n \"\"\"\n num_choose_A = 0.0\n unknown_count = 0.0\n run_times = []\n\n for i in range(iters):\n t = TestClass(*params)\n result = t.run()\n\n if result == 'A':\n num_choose_A += 1\n elif result == 'unknown':\n unknown_count += 1\n run_times.append(max(t.a_estimator.N, t.b_estimator.N))\n\n return num_choose_A/iters, unknown_count/iters, np.array(run_times)\n\n\ndef expected_results_by_lift(TestClass, params, iters, p_hat, lifts, fig_name=None):\n\n \"\"\"\n This function generates plots that show the expected results\n of the AB test as you change the lift that banner A has over\n banner B.\n \"\"\"\n\n # see how you would do in practice\n run_times_list = []\n p_A_betters = {\"lower\": [], \"upper\":[], \"mean\": []}\n p_unknowns = {\"lower\": [], \"upper\":[], \"mean\": []}\n\n (lower, mean, upper) = p_hat.p_donate_ci(10)\n\n for lift in lifts:\n print (lift)\n #lower\n p_B = p_hat.change_p_donate(lower)\n params[0] = SimStream(p_B.lift(lift)) #a_stream\n params[1] = SimStream(p_B) #b_stream\n p_better, p_unknown, time = expected_results(TestClass, params, iters)\n p_A_betters['lower'].append(p_better)\n p_unknowns['lower'].append(p_unknown)\n\n # mean\n p_B = p_hat\n params[0] = SimStream(p_B.lift(lift)) #a_stream\n params[1] = SimStream(p_B) #b_stream\n p_better, p_unknown, time = expected_results(TestClass, params, iters)\n run_times_list.append(time)\n p_A_betters['mean'].append(p_better)\n p_unknowns['mean'].append(p_unknown)\n\n #upper\n p_B = p_hat.change_p_donate(upper)\n params[0] = SimStream(p_B.lift(lift)) #a_stream\n params[1] = SimStream(p_B) #b_stream\n p_better, p_unknown, time = expected_results(TestClass, params, iters)\n p_A_betters['upper'].append(p_better)\n p_unknowns['upper'].append(p_unknown)\n\n lifts = np.array(lifts)*100\n\n avg_run_times = np.array([np.mean(run_times) for run_times in run_times_list])\n lower = [np.percentile(run_times, 5) for run_times in run_times_list]\n upper = [np.percentile(run_times, 95) for run_times in run_times_list]\n\n\n fig = plt.figure(figsize=(13, 8))\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n\n data = zip(lifts, p_A_betters['mean'], p_unknowns['mean'], avg_run_times)\n columns = ['% lift A over B', 'P(Choosing A) Median', 'P(Unknown) Median', 'Avg Time']\n df = pd.DataFrame.from_records(data, columns=columns)\n\n ax1.set_ylim([-0.1, 1.1])\n ax1.plot(lifts, p_A_betters['mean'], label='P(A wins) median')\n ax1.plot(lifts, p_A_betters['lower'], label='P(A wins) lower', alpha=0.31)\n ax1.plot(lifts, p_A_betters['upper'], label='P(A wins) upper', alpha=0.31)\n\n ax1.plot(lifts, p_unknowns['mean'], label='P(unknown)')\n ax1.set_xlabel('percent lift')\n ax1.set_ylabel('probability')\n ax1.legend(loc=7)\n\n ax2.set_xlim([lifts[0], lifts[-1]])\n ax2.plot(lifts, avg_run_times, label='avg time')\n ax2.fill_between(lifts, lower, upper, alpha=0.31, edgecolor='#3F7F4C', facecolor='0.75', linewidth=0)\n ax2.set_xlabel('percent lift')\n ax2.set_ylabel('sample size')\n ax2.legend(loc=1)\n plt.show()\n if fig_name:\n fig.savefig(fig_name)\n\n return df\n\n\n\n\n\ndef expected_results_by_interval(TestClass, params, iters, p_hat, lifts, n1, n2, n3, fig_name=None):\n\n \"\"\"\n This function generates plots that show the expected results\n of the AB test as you change the lift that banner A has over\n banner B.\n \"\"\"\n\n # see how you would do in practice\n run_times_list = {\"lower\": [], \"upper\":[], \"mean\": []}\n p_A_betters = {\"lower\": [], \"upper\":[], \"mean\": []}\n\n (lower, mean, upper) = p_hat.p_donate_ci(10)\n\n for lift in lifts:\n print (lift)\n\n p_B = p_hat\n\n # mean\n new_params = list(params)\n new_params[0] = SimStream(p_B.lift(lift)) #a_stream\n new_params[1] = SimStream(p_B) #b_stream\n new_params[2] = n1\n p_better, p_unknown, time = expected_results(TestClass, new_params, iters)\n run_times_list['mean'].append(time)\n p_A_betters['mean'].append(p_better)\n\n\n #lower\n new_params = list(params)\n new_params[0] = SimStream(p_B.lift(lift)) #a_stream\n new_params[1] = SimStream(p_B) #b_stream\n new_params[2] = n2\n p_better, p_unknown, time = expected_results(TestClass, new_params, iters)\n p_A_betters['lower'].append(p_better)\n run_times_list['lower'].append(time)\n \n\n #upper\n new_params = list(params)\n new_params[0] = SimStream(p_B.lift(lift)) #a_stream\n new_params[1] = SimStream(p_B) #b_stream\n new_params[2] = n3\n p_better, p_unknown, time = expected_results(TestClass, new_params, iters)\n p_A_betters['upper'].append(p_better)\n run_times_list['upper'].append(time)\n\n lifts = np.array(lifts)*100\n\n avg_run_times_mean = np.array([np.mean(run_times) for run_times in run_times_list['mean']])\n avg_run_times_upper = np.array([np.mean(run_times) for run_times in run_times_list['upper']])\n avg_run_times_lower = np.array([np.mean(run_times) for run_times in run_times_list['lower']])\n\n\n fig = plt.figure(figsize=(13, 8))\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n\n\n\n ax1.set_ylim([-0.1, 1.1])\n ax1.plot(lifts, p_A_betters['lower'], label='P(A wins) n = %d' % n1)\n ax1.plot(lifts, p_A_betters['mean'], label='P(A wins) n = %d' % n2)\n ax1.plot(lifts, p_A_betters['upper'], label='P(A wins) n = %d' % n3)\n\n ax1.set_xlabel('percent lift')\n ax1.set_ylabel('probability of choosing A')\n ax1.legend(loc=4)\n\n ax2.set_xlim([lifts[0], lifts[-1]])\n ax2.plot(lifts, avg_run_times_lower, label='n = %d'% n1)\n ax2.plot(lifts, avg_run_times_mean, label='n = %d'% n2)\n ax2.plot(lifts, avg_run_times_upper, label='n = %d' % n3)\n\n ax2.set_xlabel('percent lift')\n ax2.set_ylabel('sample size')\n ax2.legend(loc=1)\n plt.show()\n if fig_name:\n fig.savefig(fig_name)\n\n" ]
[ [ "numpy.percentile", "numpy.mean", "pandas.DataFrame.from_records", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
janzmazek/langerhansGUI
[ "aa0f3f1636f96964f2172383aadb734a71bc4885" ]
[ "langerhansGUI/view.py" ]
[ "import tkinter as tk\nfrom tkinter import messagebox\nfrom tkinter import filedialog\nfrom tkinter.ttk import Progressbar\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport copy\nimport webbrowser\n\n\n# Window parameters\nWIDTH = 1000\nHEIGHT = 600\n\nWHITE = \"white\"\nTEXT = \"white\"\n# BG = \"#2d2f37\"\nBG = \"#3E4149\"\n\nWELCOME_TEXT = \"WELCOME\\n \\\nTo start analyzing, load a data file (a 2-D matrix readable by np.loadtxt).\\n \\\nCells should be rows of the matrix, time should be columns of the matrix.\\n \\\nYou can save or load current state in the form of a pickle object.\"\n\n\nclass View(tk.Tk):\n \"\"\"docstring for View.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(View, self).__init__(*args, **kwargs)\n self.title(\"Analysis of Calcium Signals\")\n\n self.controller = None\n\n def register(self, controller):\n self.controller = controller\n\n def configure(self):\n menubar = tk.Menu(self)\n\n importmenu = tk.Menu(menubar, tearoff=0)\n exportmenu = tk.Menu(menubar, tearoff=0)\n editmenu = tk.Menu(menubar, tearoff=0)\n aboutmenu = tk.Menu(menubar, tearoff=0)\n\n menubar.add_cascade(label=\"Import\", menu=importmenu)\n importmenu.add_command(label=\"Import data\",\n command=self.controller.import_data\n )\n importmenu.add_command(label=\"Import settings\",\n command=self.controller.import_settings\n )\n importmenu.add_command(label=\"Import excluded\",\n command=self.controller.import_excluded\n )\n importmenu.add_command(label=\"Import object (pickle)\",\n command=self.controller.import_object\n )\n menubar.add_cascade(label=\"Export\", menu=exportmenu)\n exportmenu.add_command(label=\"Export settings\",\n command=self.controller.save_settings\n )\n exportmenu.add_command(label=\"Export image\",\n command=self.controller.save_image\n )\n exportmenu.add_command(label=\"Export event plot\",\n command=self.controller.save_eventplot\n )\n exportmenu.add_command(label=\"Export excluded\",\n command=self.controller.save_excluded\n )\n exportmenu.add_command(label=\"Export object (pickle)\",\n command=self.controller.save_object\n )\n menubar.add_cascade(label=\"Edit\", menu=editmenu)\n editmenu.add_command(label=\"Settings\",\n command=self.controller.edit_settings\n )\n menubar.add_cascade(label=\"About\", menu=aboutmenu)\n aboutmenu.add_command(label=\"Info\",\n command=lambda: webbrowser.open(\n \"https://github.com/janzmazek/cell-networks\"\n )\n )\n\n self.config(menu=menubar)\n\n # ------------------------------ TOOLBAR ---------------------------- #\n\n self.toolbar = tk.LabelFrame(self, text=\"Preprocessing Tools\",\n padx=5, pady=5, bg=BG, fg=TEXT\n )\n self.toolbar.pack(side=tk.TOP, fill=tk.BOTH, expand=tk.NO)\n\n topframe = tk.Frame(self.toolbar, bg=BG)\n topframe.pack(anchor=\"center\")\n\n flt_button = tk.Button(topframe, highlightbackground=BG,\n text=\"Filter\",\n command=self.controller.filter_click\n )\n flt_button.pack(side=tk.LEFT)\n\n dst_button = tk.Button(topframe, highlightbackground=BG,\n text=\"Compute distributions\",\n command=self.controller.distributions_click\n )\n dst_button.pack(side=tk.LEFT)\n\n bin_button = tk.Button(topframe, highlightbackground=BG,\n text=\"Binarize\",\n command=self.controller.binarize_click\n )\n bin_button.pack(side=tk.LEFT)\n\n aex_button = tk.Button(topframe, highlightbackground=BG,\n text=\"Autoexclude\",\n command=self.controller.autoexclude_click\n )\n aex_button.pack(side=tk.LEFT)\n\n alim_button = tk.Button(topframe, highlightbackground=BG,\n text=\"Autolimit\",\n command=self.controller.autolimit_click\n )\n alim_button.pack(side=tk.LEFT)\n\n # ------------------------------ CANVAS ----------------------------- #\n\n self.canvas = tk.Canvas(self)\n self.canvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n middleframe = tk.Frame(self.canvas,\n bg=BG, borderwidth=5, relief=tk.RAISED\n )\n middleframe.pack(side=tk.TOP, fill=\"none\", expand=True)\n\n text = tk.Label(middleframe, bg=BG, fg=WHITE, text=WELCOME_TEXT)\n text.pack(anchor=\"center\", padx=10, pady=10)\n\n data_button = tk.Button(middleframe, highlightbackground=BG,\n text=\"Import Data\",\n command=self.controller.import_data\n )\n data_button.pack(side=tk.LEFT, padx=20, pady=20)\n\n object_button = tk.Button(middleframe, highlightbackground=BG,\n text=\"Import Object\",\n command=self.controller.import_object\n )\n object_button.pack(side=tk.RIGHT, padx=20, pady=20)\n # ------------------------------ NAVBAR ----------------------------- #\n\n self.navbar = tk.LabelFrame(self, text=\"Navigation\",\n padx=5, pady=5, bg=BG, fg=TEXT\n )\n self.navbar.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=tk.NO)\n\n exclude_button = tk.Button(self.navbar, highlightbackground=BG,\n text=\"(↓) exclude\",\n command=self.controller.exclude_click\n )\n exclude_button.pack(side=tk.LEFT)\n\n bottomframe = tk.Frame(self.navbar, bg=BG)\n bottomframe.pack(side=tk.LEFT, fill=\"none\", expand=True)\n\n prev_button = tk.Button(bottomframe, highlightbackground=BG,\n text=\"(←) Prev\",\n command=self.controller.previous_click\n )\n prev_button.pack(side=tk.LEFT)\n\n self.cell_number_text = tk.Label(bottomframe, bg=BG, fg=TEXT, text=\"0\")\n self.cell_number_text.pack(side=tk.LEFT)\n\n next_button = tk.Button(bottomframe, highlightbackground=BG,\n text=\"Next (→)\",\n command=self.controller.next_click\n )\n next_button.pack(side=tk.LEFT)\n\n unex_button = tk.Button(self.navbar, highlightbackground=BG,\n text=\"unexclude (↑)\",\n command=self.controller.unexclude_click\n )\n unex_button.pack(side=tk.RIGHT)\n\n self.bind(\"<Left>\", lambda e: self.controller.previous_click())\n self.bind(\"<Right>\", lambda e: self.controller.next_click())\n self.bind(\"<Up>\", lambda e: self.controller.unexclude_click())\n self.bind(\"<Down>\", lambda e: self.controller.exclude_click())\n\n self.minsize(width=WIDTH, height=HEIGHT)\n\n self.progressbar = Progressbar(self, orient=tk.HORIZONTAL,\n length=100, mode='determinate'\n )\n self.progressbar.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=tk.NO)\n\n def open_file(self):\n filename = filedialog.askopenfilename(\n title=\"Select file\",\n filetypes=(\n (\"txt files\", \"*.txt\"),\n (\"YAML files\", \"*.yaml\"),\n (\"pickle files\", \"*.pkl\")\n )\n )\n if filename == '':\n return None\n return filename\n\n def open_directory(self):\n \"\"\"\n This method displays the file dialog box to open file and returns the\n file name.\n \"\"\"\n directory = filedialog.askdirectory()\n if directory == '':\n return None\n return directory\n\n def save_as(self, extension):\n filename = filedialog.asksaveasfile(\n mode='w', defaultextension=extension\n )\n if filename is None:\n return None\n return filename.name\n\n def draw_fig(self, fig):\n if type(self.canvas) == tk.Canvas:\n self.canvas.destroy()\n else:\n self.canvas.get_tk_widget().destroy()\n self.canvas = FigureCanvasTkAgg(fig, master=self)\n self.canvas.draw()\n self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n def open_settings_window(self, settings):\n # Open window\n self.settings_window = tk.Toplevel()\n self.settings_window.title(\"Settings\")\n\n # Add upper frame\n main_frame = tk.Frame(self.settings_window, bg=BG)\n main_frame.pack(fill=tk.BOTH, expand=tk.YES)\n\n self.entries = self.__add_frame(settings, main_frame)\n\n apply_parameters_button = tk.Button(\n main_frame, highlightbackground=BG, text=\"Apply parameters\",\n command=self.controller.apply_parameters_click\n )\n apply_parameters_button.pack(\n side=tk.BOTTOM, fill=tk.BOTH, expand=tk.YES, padx=5, pady=5\n )\n\n def __add_frame(self, parameter, container):\n if type(parameter) in (int, float):\n e = tk.Entry(container)\n e.pack(side=tk.LEFT)\n e.delete(0, tk.END)\n e.insert(0, parameter)\n return e\n elif type(parameter) is dict:\n dictionary = {}\n for key in parameter:\n parameter_frame = tk.LabelFrame(\n container, text=key, bg=BG, fg=TEXT\n )\n parameter_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=tk.YES)\n dictionary[key] = self.__add_frame(\n parameter[key], parameter_frame\n )\n return dictionary\n elif type(parameter) is list:\n array = []\n for key in range(len(parameter)):\n array.append(self.__add_frame(parameter[key], container))\n return array\n\n def update_progressbar(self, i):\n self.progressbar[\"value\"] = i\n" ]
[ [ "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg" ] ]
mrbermell/ffai
[ "8b70faf615fc70eb40aa8b3519a7d2339872ea15" ]
[ "tests/performance/run_env.py" ]
[ "#!/usr/bin/env python3\n\nimport gym\nimport numpy as np\nimport botbowl\nfrom botbowl.core.game import Game\nfrom botbowl.core.model import Agent\n\nimport cProfile\nimport io\nimport pstats\n\n\ndef profile_and_print_result(function, sortkey=\"tottime\"):\n \"\"\"\n Choose sortkey from: 'ncalls', 'tottime', 'percall', 'cumtime', 'percall', and others\n \"\"\"\n pr = cProfile.Profile()\n\n pr.enable()\n function()\n pr.disable()\n\n s = io.StringIO()\n ps = pstats.Stats(pr, stream=s).sort_stats(sortkey)\n ps.print_stats(50)\n print(s.getvalue())\n\n\ndef run_env(n, enable_forward_model, env_name=\"botbowl-11-v3\"):\n env = gym.make(env_name)\n\n seed = 0\n env.seed(0)\n rnd = np.random.RandomState(seed)\n\n for _ in range(n):\n env.reset()\n if enable_forward_model:\n env.game.enable_forward_model()\n done = False\n\n while not done:\n _, _, done, _ = env.step(get_random_action_from_env(env, rnd))\n\n\ndef get_random_action_from_env(env, random_state):\n action_types = env.available_action_types()\n action_type = random_state.choice(action_types)\n\n available_positions = env.available_positions(action_type)\n pos = random_state.choice(available_positions) if len(available_positions) > 0 else None\n\n return {'action-type': action_type,\n 'x': pos.x if pos is not None else None,\n 'y': pos.y if pos is not None else None}\n\n\ndef run_game(nbr_of_games, enable_forward_model):\n config = botbowl.load_config(\"gym-11\")\n config.fast_mode = True\n ruleset = botbowl.load_rule_set(config.ruleset)\n home = botbowl.load_team_by_filename(\"human\", ruleset)\n away = botbowl.load_team_by_filename(\"human\", ruleset)\n away_agent = Agent(\"Human 1\", human=True, agent_id=1)\n home_agent = Agent(\"Human 2\", human=True, agent_id=2)\n\n seed = 0\n random_agent = botbowl.make_bot('random')\n random_agent.rnd = np.random.RandomState(seed)\n\n for _ in range(nbr_of_games):\n game = Game(seed, home, away, home_agent, away_agent, config)\n game.init()\n if enable_forward_model:\n game.enable_forward_model()\n while not game.state.game_over:\n game.step(random_agent.act(game))\n\n\nif __name__ == \"__main__\":\n nbr_of_games = 10\n\n print(f\"---- Game played {nbr_of_games} times - forward model disabled ------\")\n profile_and_print_result(function=lambda: run_game(nbr_of_games, enable_forward_model=False), sortkey=\"tottime\")\n\n print(f\"---- Game played {nbr_of_games} times - forward model enabled ------\")\n profile_and_print_result(function=lambda: run_game(nbr_of_games, enable_forward_model=True), sortkey=\"tottime\")\n\n print(f\"---- Gym played {nbr_of_games} times - forward model disabled ------\")\n profile_and_print_result(function=lambda: run_env(nbr_of_games, enable_forward_model=False), sortkey=\"tottime\")\n\n\n\n" ]
[ [ "numpy.random.RandomState" ] ]
nesl/DeepCEP_DAIS
[ "04dbfefe3c42d74836035a73bfbcd51eebc7992d" ]
[ "detection.py" ]
[ "import os\nimport time\nimport cv2\nimport numpy as np\nfrom yolo.yolo_model import YOLO\n\ndef process_image(img):\n \"\"\"Resize, reduce and expand image.\n\n # Argument:\n img: original image.\n\n # Returns\n image: ndarray(64, 64, 3), processed image.\n \"\"\"\n image = cv2.resize(img, (416, 416), interpolation=cv2.INTER_CUBIC)\n image = np.array(image, dtype='float32')\n image /= 255.\n image = np.expand_dims(image, axis=0)\n\n return image\n\n\ndef get_classes(file):\n \"\"\"Get classes name.\n\n # Argument:\n file: classes name for database.\n\n # Returns\n class_names: List, classes name.\n\n \"\"\"\n with open(file) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n\n return class_names\n\n\ndef draw(image, boxes, scores, classes, all_classes):\n \"\"\"Draw the boxes on the image.\n\n # Argument:\n image: original image.\n boxes: ndarray, boxes of objects.\n classes: ndarray, classes of objects.\n scores: ndarray, scores of objects.\n all_classes: all classes name.\n \"\"\"\n for box, score, cl in zip(boxes, scores, classes):\n x, y, w, h = box\n\n top = max(0, np.floor(x + 0.5).astype(int))\n left = max(0, np.floor(y + 0.5).astype(int))\n right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))\n bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))\n\n cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)\n cv2.putText(image, '{0} {1:.2f}'.format(all_classes[cl], score),\n (top, left - 6),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.6, (0, 0, 255), 1,\n cv2.LINE_AA)\n\n# print('class: {0}, score: {1:.2f}'.format(all_classes[cl], score))\n# print('box coordinate x,y,w,h: {0}'.format(box))\n\n print()\n\n\ndef detect_image(image, yolo, all_classes):\n \"\"\"Use yolo v3 to detect images.\n\n # Argument:\n image: original image.\n yolo: YOLO, yolo model.\n all_classes: all classes name.\n\n # Returns:\n image: processed image.\n \"\"\"\n pimage = process_image(image)\n\n start = time.time()\n boxes, classes, scores = yolo.predict(pimage, image.shape)\n end = time.time()\n\n# print('time: {0:.2f}s'.format(end - start))\n\n if boxes is not None:\n draw(image, boxes, scores, classes, all_classes)\n\n return image, boxes, classes, scores\n\n\ndef load_yolo_model():\n print('#########################################')\n print('Loading YOLO object detection model......')\n print('#########################################\\n')\n yolo = YOLO(0.6, 0.5)\n file = 'yolo/coco_classes.txt'\n all_classes = get_classes(file)\n print('#########################################')\n print('Yolo Loaded!......')\n print('#########################################\\n')\n return yolo, all_classes\n\ndef object_detector(image, yolo, all_classes):\n # input\n image, boxes, classes, scores = detect_image(image, yolo, all_classes)\n \n # only output the detected object with largest confidence.\n if scores is None:\n return 'none'\n else:\n out_id = scores.argmax()\n print(all_classes[classes[out_id]])\n return all_classes[classes[out_id]]\n \n \nif __name__ == '__main__':\n image_path = 'cam1.jpg'\n image = cv2.imread(image_path)\n yolo, all_classes = load_yolo_model()\n\n object_detector(image, yolo, all_classes)\n" ]
[ [ "numpy.array", "numpy.expand_dims", "numpy.floor" ] ]
thompson318/arucochristmas
[ "0cf6e2f646ae4d0380aa1330b155418db2449ad1" ]
[ "arucochristmas/aruco.py" ]
[ "\"\"\" Module to provide the aruco tracking logic \"\"\"\n#pylint:disable=import-error\nimport numpy as np\nfrom picamera import PiCamera\nfrom picamera.array import PiRGBArray\nfrom sksurgeryarucotracker.arucotracker import ArUcoTracker\n\n\ndef init_camera_and_tracker():\n \"\"\"\n Initialises an sksurgeryarucotracker object and\n and picamera capture object\n\n :returns: The initialised arucotracker and picamera objects\n \"\"\"\n config = {\n \"video source\" : 'none',\n \"debug\" : False,\n \"aruco dictionary\" : \"DICT_4X4_50\"\n }\n artracker = ArUcoTracker(config)\n\n artracker.start_tracking()\n\n camera = PiCamera()\n camera.resolution = (640, 480)\n\n return artracker, camera\n\ndef get_marker_pos(artracker, camera):\n \"\"\"\n Get's a single frame from the camera object, and uses the\n artracker object to detect and track an aruco tag\n \"\"\"\n capture = PiRGBArray(camera)\n camera.capture(capture, format='bgr')\n image = capture.array\n (_port_handles, _timestamps, _framenumbers,\n tracking, _quality) = artracker.get_frame(image)\n x_ord = None\n y_ord = None\n got_frame = False\n if tracking:\n #pylint:disable=unsubscriptable-object\n x_ord = np.mean(np.array(tracking).flat[3::16])\n y_ord = np.mean(np.array(tracking).flat[7::16])\n got_frame = True\n\n return got_frame, x_ord, y_ord\n" ]
[ [ "numpy.array" ] ]
alfa-th/lang-modeler-pytorch
[ "a13e94841df9fc3996b33a93d0a58a99c0596359" ]
[ "transformer/main.py" ]
[ "import time\nimport math\nimport copy\n\nfrom torchtext.datasets import WikiText2\nimport torch.nn as nn\nimport torch\n\nfrom functions import evaluate, train\nfrom dataset import get_vocab, get_processed_data, get_tokenizer\nfrom classes.transformer_model import TransformerModel\n\ntokenizer = get_tokenizer(\"basic_english\")\nvocab = get_vocab(WikiText2(split=\"train\"), tokenizer)\ndevice = torch.device(\"cpu\")\n\nntokens = len(vocab)\nlr = 0.5\nepoch = 3\nbptt = 2\nbatch_size = 10\ndevice = torch.device(\"cpu\")\ntrain_data, val_data, test_data = get_processed_data(\n WikiText2(), batch_size, device, vocab, tokenizer)\n\nmodel = TransformerModel(\n ntoken=ntokens, # Size of vocab\n d_model=200, # Embedding dimension\n d_hid=200, # Dimension of hidden feedforward network in nn.TransformerEncoder\n nhead=2, # Number of heads in nn.MultiHeadAttention\n nlayers=2, # Number of nn.TransformerEncoderLayer in nn.TransformerEncoder\n dropout=0.2, # Dropout probability\n)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=lr)\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.95)\n\nepoch = 3\nbest_val_loss = float(\"inf\")\nbest_model = None\n\nprint(\"Training\")\n\nfor epoch in range(1, epoch + 1):\n epoch_start_time = time.time()\n train(model, criterion, optimizer, scheduler,\n device, train_data, epoch, lr, bptt, ntokens)\n val_loss = evaluate(model, criterion, val_data, bptt, device, ntokens)\n val_ppl = math.exp(val_loss)\n elapsed = time.time() - epoch_start_time\n print(\n f\"{'=' * 89} \\n\"\n f\"| end of epoch {epoch:3d} | time: {elapsed:5.2f}s\"\n f\"| valid loss {val_loss:5.2f} | valid_ppl: {val_ppl:8.2f}s\"\n f\"{'=' * 89} \\n\"\n )\n\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n best_model = copy.deepcopy(model)\n\ntest_loss = evaluate(best_model, criterion, test_data, bptt, device, ntokens)\ntest_ppl = math.exp(test_loss)\nprint(\n f\"{'=' * 89}\"\n f\"| end of training | test loss {test_loss:5.2f} \"\n f\"| test ppl {test_ppl:8.2f} \"\n f\"{'=' * 89}\"\n)\n" ]
[ [ "torch.device", "torch.nn.CrossEntropyLoss", "torch.optim.lr_scheduler.StepLR" ] ]
mcsorkun/Genetic-Selection-Cheminformatics
[ "6f85cd26ffc0aa00b8c34c6d482a2a5f4de149ae" ]
[ "AqSolDB_example.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 26 18:35:38 2020\r\n\r\n@author: Murat Cihan Sorkun\r\n\r\nFeature Selection by Genetic Algorithm: An example on AqSolDB dataset (Aqueous Solubility) \r\n\"\"\"\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neural_network import MLPRegressor\r\nimport genetic\r\nfrom rdkit import Chem\r\nimport pandas as pd\r\nimport mordred\r\nfrom mordred import Calculator, descriptors\r\n\r\n\r\ndef get_mordred_descriptors(smiles_list):\r\n \r\n calc = mordred.Calculator() \r\n \r\n calc.register(mordred.AtomCount) #16\r\n calc.register(mordred.RingCount) #139\r\n calc.register(mordred.BondCount) #9 \r\n calc.register(mordred.HydrogenBond) #2 \r\n calc.register(mordred.CarbonTypes) #10\r\n calc.register(mordred.SLogP) #2\r\n calc.register(mordred.Constitutional) #16 \r\n calc.register(mordred.TopoPSA) #2\r\n calc.register(mordred.Weight) #2\r\n calc.register(mordred.Polarizability) #2\r\n calc.register(mordred.McGowanVolume) #1\r\n \r\n name_list=[]\r\n for desc_name in calc.descriptors:\r\n name_list.append(str(desc_name))\r\n \r\n descriptors_list=[] \r\n for smiles in smiles_list:\r\n # print(smiles)\r\n mol=Chem.MolFromSmiles(smiles)\r\n mol=Chem.AddHs(mol)\r\n calculated_descriptors = calc(mol)\r\n descriptors_list.append(calculated_descriptors._values) \r\n \r\n descriptors_df=pd.DataFrame(descriptors_list,columns=name_list)\r\n descriptors_df = descriptors_df.select_dtypes(exclude=['object']) \r\n \r\n return descriptors_df\r\n\r\n \r\ndata_name = \"AqSolDB\" \r\ndata_df = pd.read_csv(\"data/\"+data_name+\".csv\")\r\n\r\nprint(\"\\nGenerating features from SMILES...\")\r\ndescriptors_data=get_mordred_descriptors(data_df[\"SMILES\"].values)\r\n\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(descriptors_data.values, data_df[\"Solubility\"].values, train_size=0.75, test_size=0.25, random_state=0)\r\n\r\npopulation_size=20\r\nnum_of_generations=20\r\nmut_ratio=0.5\r\n\r\n#select features from genetic algorithm\r\nselected_features=genetic.select_features(X_train,y_train,population_size,num_of_generations,mut_ratio,\"reg\",verbose=1)\r\n\r\nX_train_selected=genetic.transform_data(X_train,selected_features)\r\nX_test_selected=genetic.transform_data(X_test,selected_features)\r\n\r\n\r\nprint(\"\\nTotal generated features:\",descriptors_data.shape[1])\r\nprint(\"\\nTotal selected features from genetic algorithm:\",X_train_selected.shape[1])\r\n\r\n\r\nprint(\"\\nTraining models by neural networks..\")\r\nmodel=MLPRegressor(activation='tanh', hidden_layer_sizes=(200), max_iter=200, solver='adam')\r\n\r\nmodel.fit(X_train, y_train)\r\ndefault_score=model.score(X_test, y_test)\r\nprint(\"\\nTest score(R2) without feature selection:\",default_score)\r\npredictions=model.predict(X_test)\r\n\r\nmodel.fit(X_train_selected, y_train)\r\ngenetic_score=model.score(X_test_selected, y_test)\r\nprint(\"Test score(R2) with genetic selection:\",genetic_score)\r\npredictions=model.predict(X_test_selected)\r\n\r\n\r\n\r\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.neural_network.MLPRegressor" ] ]
neurotechdk/fnirs-bci
[ "fa7d757dad2060b8acc6e64a51968707b45aa120" ]
[ "code/exp_bci_task.py" ]
[ "from sklearn.model_selection import KFold\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom helper_functions import *\r\nfrom helper_functions import preprocess, visualize_loss, show_plot\r\nimport warnings\r\nfrom icecream import ic\r\nic(\"Importing packages...\")\r\nwith warnings.catch_warnings():\r\n import mne\r\n import mne_nirs\r\n import numpy as np\r\n import pandas as pd\r\n import os\r\n import sys\r\n import matplotlib.pyplot as plt\r\n import getopt\r\n from tensorflow import keras\r\n from tensorflow.keras import metrics\r\n import tensorflow as tf\r\n import time\r\n\r\n import wandb\r\n from wandb.keras import WandbCallback\r\n\r\n import re\r\n import bcolors\r\n\r\n\r\ndef main():\r\n v = True\r\n\r\n config = {\r\n 'dropout': 0.5,\r\n 'dropout_2': 0.5,\r\n 'train_split': 0.6,\r\n \"learning_rate\": 0.00005,\r\n 'preprocess': \"medium\",\r\n 'batch_size': 24,\r\n 'epochs': 250,\r\n 'trainable': False,\r\n 'dense_units': 256,\r\n 'layers_transferred': 0, # [0, 1, 2, 3, 4]\r\n 'bci_task': \"data/snirf/bci_task_3_arithmetic_rotation.snirf\",\r\n 'n_augmentations': 10, # 0, 10, 50\r\n 'model': \"models/model-lstm.h5\",\r\n 'test_channel': 0,\r\n }\r\n\r\n wandb.init(\r\n project=\"thought_classification\", entity=\"esbenkran\",\r\n tags=[\"transfer_learning\", \"final\", \"extension\"], config=config)\r\n\r\n config = wandb.config\r\n\r\n raw_path = config.get(\"bci_task\")\r\n task_1 = re.findall(r'(?<=\\d_).*(?=_)', raw_path)[0]\r\n task_2 = re.findall(r'(?<=_)[a-z]{3,10}(?=.snirf)', raw_path)[0]\r\n\r\n try:\r\n pre_path = f\"data/datasets/{task_1}_{task_2}_{config.get('n_augmentations')}\"\r\n x_train = np.load(f\"{pre_path}_x_train.npy\")\r\n y_train = np.load(f\"{pre_path}_y_train.npy\")\r\n x_test = np.load(f\"{pre_path}_x_test.npy\")\r\n y_test = np.load(f\"{pre_path}_y_test.npy\")\r\n except:\r\n raise Exception(\r\n f\"{bcolors.FAIL}\\nNo preprocessed data found for {task_1}_{task_2} with {config.get('n_augmentations')} augmentations.\\n\\nPlease make a data/datasets directory and run code/generate_datasets.py first.\\n\\n{bcolors.ENDC}\")\r\n\r\n past = 39\r\n split_fraction = config.get(\"train_split\")\r\n date_time_key = \"time\"\r\n\r\n batch_size = config.get(\"batch_size\")\r\n dense_units = config.get(\"dense_units\")\r\n\r\n # Make each index repeat 39 times in Y\r\n y_train = np.repeat(y_train, past, axis=0)\r\n y_test = np.repeat(y_test, past, axis=0)\r\n\r\n if \"dense\" in config.get(\"model\"):\r\n dataset_train = keras.preprocessing.timeseries_dataset_from_array(\r\n x_train[:, config.get(\"test_channel\")].flatten(),\r\n y_train,\r\n shuffle=True,\r\n batch_size=batch_size,\r\n sequence_length=past,\r\n sequence_stride=past)\r\n\r\n dataset_val = keras.preprocessing.timeseries_dataset_from_array(\r\n x_test[:, config.get(\"test_channel\")].flatten(),\r\n y_test,\r\n shuffle=False,\r\n batch_size=batch_size,\r\n sequence_length=past,\r\n sequence_stride=past)\r\n elif \"lstm\" in config.get(\"model\"):\r\n dataset_train = keras.preprocessing.timeseries_dataset_from_array(\r\n x_train,\r\n y_train,\r\n shuffle=True,\r\n batch_size=batch_size,\r\n sequence_length=past,\r\n sequence_stride=past)\r\n\r\n dataset_val = keras.preprocessing.timeseries_dataset_from_array(\r\n x_test,\r\n y_test,\r\n shuffle=False,\r\n batch_size=batch_size,\r\n sequence_length=past,\r\n sequence_stride=past)\r\n\r\n if v:\r\n print(\r\n f\"Take batches out of the training dataset (currently {batch_size} samples)\")\r\n for batch in dataset_val.take(1):\r\n inputs, targets = batch\r\n\r\n print(\"Input shape:\", inputs.numpy().shape)\r\n print(\"Target shape:\", targets.numpy().shape, f\"{bcolors.ENDC}\")\r\n\r\n # print(f\"{bcolors.HEADER}Test set Y {y_test}{bcolors.ENDC}\")\r\n # print(f\"{bcolors.HEADER}Y in test set\", targets.numpy().flatten())\r\n # print(f\"{bcolors.HEADER}X in test set\", inputs.numpy())\r\n\r\n path_checkpoint = \"model_checkpoint.h5\"\r\n\r\n print(f\"{bcolors.ITALIC}Loading model...{config.get('model')}.{bcolors.ENDC}\")\r\n\r\n source_model = keras.models.load_model(config.get(\"model\"))\r\n model = keras.models.Sequential(source_model.layers[:-1])\r\n\r\n print(\r\n f\"{bcolors.ITALIC}The input layer is: {model.layers[0].input_shape}\\nand input actual is: {source_model.layers[0].output_shape}{bcolors.ENDC}\")\r\n\r\n units = 100\r\n dense_units = config.get(\"dense_units\")\r\n dropout = 0.5\r\n\r\n print(f\"{bcolors.ITALIC}Source model layers with {units} units (LSTM) or {dense_units} units (Dense) and dropout {dropout}.{bcolors.ENDC}\")\r\n\r\n # Reset all layers above layers_transferred\r\n for layer in range(len(model.layers)):\r\n # Layers transferred will be none, lstm1 (dense), lstm2 (dense), lstm3, lstm3+dense up to 4 [0, 1, 2, 3, 4]\r\n if layer not in list(range(config.get(\"layers_transferred\"))):\r\n if \"lstm\" in config.get(\"model\"):\r\n if \"de\" in model.layers[layer].name:\r\n if \"lstm-3\" in config.get(\"model\"):\r\n print(\r\n f\"{bcolors.HEADER}Resetting dense layer in LSTM-3 {model.layers[layer].name}{bcolors.ENDC}\")\r\n reset_weights(\r\n model.layers[layer], model, \"data/weights-dense-128-200-layer.npy\")\r\n elif \"lstm\" in config.get(\"model\"):\r\n print(\r\n f\"{bcolors.HEADER}Resetting dense layer in LSTM {model.layers[layer].name}{bcolors.ENDC}\")\r\n reset_weights(\r\n model.layers[layer], model, \"data/weights-dense-128-100-layer.npy\")\r\n elif \"bi\" in model.layers[layer].name:\r\n print(\r\n f\"{bcolors.HEADER}Resetting Bidirectional LSTM layer {model.layers[layer].name}{bcolors.ENDC}\")\r\n reset_weights(\r\n model.layers[layer], model, \"data/weights-lstm-bi-layer.npy\")\r\n else:\r\n print(\r\n f\"{bcolors.HEADER}Resetting LSTM uni layer {model.layers[layer].name}{bcolors.ENDC}\")\r\n reset_weights(\r\n model.layers[layer], model, \"data/weights-lstm-uni-layer.npy\")\r\n elif \"dense\" in config.get(\"model\"):\r\n print(\r\n f\"{bcolors.HEADER}Resetting dense layer {model.layers[layer].name}{bcolors.ENDC}\")\r\n reset_weights(model.layers[layer], model,\r\n \"data/weights-dense-128-39-layer.npy\")\r\n if not config.get(\"trainable\"):\r\n if layer in list(range(config.get(\"layers_transferred\"))):\r\n model.layers[layer].trainable = False\r\n\r\n model.add(keras.layers.Dense(\r\n dense_units, activation=\"relu\", name=\"de_transfer\"))\r\n model.add(keras.layers.Dense(1, activation=\"sigmoid\", name=\"de_output\"))\r\n model.summary()\r\n\r\n opt = keras.optimizers.Nadam(learning_rate=config.get(\"learning_rate\"))\r\n model.compile(loss=\"binary_crossentropy\",\r\n optimizer=opt,\r\n metrics=[\r\n 'binary_crossentropy',\r\n custom_binary_accuracy,\r\n f1\r\n ])\r\n\r\n es_callback = keras.callbacks.EarlyStopping(\r\n monitor=\"val_loss\", min_delta=0, patience=500, verbose=1, mode=\"max\")\r\n\r\n modelckpt_callback = keras.callbacks.ModelCheckpoint(\r\n monitor=\"binary_crossentropy\",\r\n filepath=path_checkpoint,\r\n verbose=1,\r\n save_weights_only=True,\r\n save_best_only=True,)\r\n\r\n history = model.fit(\r\n dataset_train,\r\n epochs=config.get(\"epochs\"),\r\n validation_data=dataset_val,\r\n callbacks=[es_callback, modelckpt_callback,\r\n WandbCallback(data_type=\"time series\")],\r\n )\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.layers.Dense", "tensorflow.keras.preprocessing.timeseries_dataset_from_array", "numpy.load", "numpy.repeat", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.models.Sequential" ] ]
phillipi/AMT_Real_vs_Fake
[ "4f5f25cdaffc8ee8c0a3ccd186f6ff45beb0c5c9" ]
[ "process_csv.py" ]
[ "\nimport numpy as np\nimport csv\nfrom collections import OrderedDict\nimport argparse\nfrom tqdm import tqdm\n\nfrom IPython import embed\n\ndef collect_csv_results(filename):\n\twith open(filename, newline='') as csvfile:\n\t\treader = csv.DictReader(csvfile)\n\t\tfor rr,row in enumerate(reader):\n\t\t\tif(rr==0):\n\t\t\t\traw_dict = row\n\t\t\t\tfor key in row.keys():\n\t\t\t\t\traw_dict[key] = [raw_dict[key],]\n\t\t\telse:\n\t\t\t\tfor key in row.keys():\n\t\t\t\t\traw_dict[key].append(row[key])\n\n\treturn raw_dict\n\n\ndef read_csv(filename, N_practice, N_imgs):\n\traw_dict = collect_csv_results(filename)\n\n\tgt_prefix = 'Input.gt_side'\n\tans_prefix = 'Answer.selection'\n\n\tgts = []\n\tans = []\n\tfor nn in range(N_imgs):\n\t\tgts.append(1.*(np.array(raw_dict['%s%i'%(gt_prefix,nn)])=='right'))\n\t\tans.append(1.*(np.array(raw_dict['%s%i'%(ans_prefix,nn)])=='right'))\n\tgts = np.array(gts)\n\tans = 1-np.array(ans)\n\tN_turkers = gts.shape[1]\n\n\tdef get_method(in_string):\n\t\treturn ('/').join(in_string.split('/')[:-1])\n\n\tmleft_prefix = 'Input.images_left'\n\tmright_prefix = 'Input.images_right'\n\n\tmethods_left = []\n\tmethods_right = []\n\tfor nn in range(N_imgs):\n\t\tmethods_left.append([get_method(val) for val in raw_dict['%s%i'%(mleft_prefix,nn)]])\n\t\tmethods_right.append([get_method(val) for val in raw_dict['%s%i'%(mright_prefix,nn)]])\n\n\t# [np.sum(methods_left==method) for method in np.unique(methods_left)]\n\tall_method_names = np.unique(np.array(methods_left+methods_right))\n\tmethods_left = np.array(methods_left)\n\tmethods_right = np.array(methods_right)\n\n\ta = []\n\tfor method in all_method_names:\n\t\ta.append(np.sum(methods_left==method)+np.sum(methods_right==method))\n\n\tgt_method_name = all_method_names[np.argmax(a)]\n\tall_method_names = np.setdiff1d(all_method_names, gt_method_name)\n\t# all_method_names = np.setdiff1d(np.unique(np.array(methods_left+methods_right)), gt_method)\n\t# all_method_names = np.unique(np.array(methods_left+methods_right))\n\tmethod_nums = np.zeros((N_imgs, N_turkers))\n\tfor (mm,method) in enumerate(all_method_names):\n\t\tmethod_nums[(method==methods_left) + (method==methods_right)] = mm\n\n\treturn (gts[N_practice:], ans[N_practice:], method_nums[N_practice:], all_method_names, gt_method_name)\n\n\ndef calculate_results(gts, ans, method_nums):\n\tfools = []\n\tfor mm in range(int(np.max(method_nums)+1)):\n\t\tmask = (method_nums==mm)\n\t\tacc = np.mean((gts==ans)*mask)/(np.mean(mask)+.000001)\n\t\tfool = 1-acc\n\t\tfools.append(fool)\n\n\treturn fools\n\n\ndef bootstrap(gts, ans, method_nums):\n\tN,A = gts.shape\n\tgts_out = gts.copy()\n\tans_out = ans.copy()\n\tmethod_nums_out = method_nums.copy()\n\n\ta_inds = np.random.randint(A, size=A)\n\tn_inds = np.random.randint(N, size=(N,A))\n\tfor (aa,a_ind) in enumerate(a_inds):\n\t\taa\n\t\tgts_out[:,aa] = gts[n_inds[:,aa], a_ind]\n\t\tans_out[:,aa] = ans[n_inds[:,aa], a_ind]\n\t\tmethod_nums_out[:,aa] = method_nums[n_inds[:,aa], a_ind]\n\n\treturn gts_out, ans_out, method_nums_out\n\t\t\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('-f','--filename', type=str, default='expt/results0.csv')\nparser.add_argument('--N_practice', type=int, default=10)\nparser.add_argument('--N_imgs', type=int, default=60)\nparser.add_argument('--N_bootstrap', type=int, default=10000)\n\nopt = parser.parse_args()\n\ngts, ans, method_nums, all_method_names, gt_method_name = read_csv(opt.filename, opt.N_practice, opt.N_imgs)\nfools = calculate_results(gts, ans, method_nums)\n\nprint('Turkers [%i], each do images [%i]'%(gts.shape[1], gts.shape[0]))\n\n# results\nprint('\\nMean')\nfor (mm,method) in enumerate(all_method_names):\n\tprint('%2.2f%% \\t[%s] (%i)'%(fools[mm]*100,method,np.sum(method_nums==mm)))\n\n\nprint('\\nBootstrapping')\nbootstrap_fools = []\nfor a in tqdm(range(opt.N_bootstrap)):\n\tbootstrap_fools.append(calculate_results(*bootstrap(gts, ans, method_nums)))\nbootstrap_fools = np.array(bootstrap_fools)\n\nfool_means = np.mean(bootstrap_fools, axis=0)\nfool_stds = np.std(bootstrap_fools, axis=0)\n\nfor (mm,method) in enumerate(all_method_names):\n\tprint('%2.2f\\t+/-\\t%2.2f%%\\t [%s]'%(fool_means[mm]*100,fool_stds[mm]*100,method))\n\nbetters = np.zeros((len(all_method_names),len(all_method_names)))\nfor (mm,method) in enumerate(all_method_names):\n\tprint('\\n[%s] >'%method)\n\tfor (nn,method2) in enumerate(all_method_names):\n\t\tbetters[mm,nn] = np.mean(bootstrap_fools[:,mm] > bootstrap_fools[:,nn]) + .5*np.mean(bootstrap_fools[:,mm]==bootstrap_fools[:,nn])\n\t\tif(mm!=nn):\n\t\t\tprint('\\t%02.1f%% \\t[%s]'%(betters[mm,nn]*100.,method2))\n\n\n\n" ]
[ [ "numpy.setdiff1d", "numpy.max", "numpy.std", "numpy.argmax", "numpy.mean", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.random.randint" ] ]
jalilm/SDN-Monitoring
[ "4ba8dd0f0ed5e44c0e803713d6c82ee2c815c7e4" ]
[ "util/get_percentage_plots.py" ]
[ "#!/usr/bin/env python\n\nimport glob\nimport os\n\nimport matplotlib.pyplot as plt\n\n\ndef main():\n res_per_k = {}\n for fn in glob.glob(os.path.expanduser('~/logs/') + '*-Topk-*'):\n if os.path.isfile(fn):\n # bfn = os.path.basename(fn)\n # mechanism, topk, rate, directory, timestep, hh, cm = bfn.split('-')\n with open(fn, \"r\") as f:\n f_res = f.readlines()\n for l in f_res:\n found = int(l.split(':')[1])\n k = int(l.split(':')[0].split(' ')[0])\n counters = int(l.split(':')[0].split(' ')[1])\n try:\n res_per_k[k][counters] = found\n except KeyError:\n res_per_k[k] = {counters: found}\n\n for k in res_per_k.keys():\n res = []\n counters = []\n sorted_res_per_k = sorted(res_per_k[k], key=lambda x: x)\n for counter in sorted_res_per_k:\n counters.append(counter)\n res.append(100.0 * (res_per_k[k][counter]) / k)\n plt.plot(counters, res, marker=(4, k % 4, 360 / k), label='k=' + str(k))\n plt.ylim((0, 100))\n plt.ylabel('percentage')\n plt.xlabel('counters')\n plt.title('Percentage of found top-k flows')\n plt.legend(numpoints=1)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
pavanchhatpar/AttnGAN
[ "9a4ac53aee9890ad488f11aeb913623d2c93a6b2" ]
[ "code/trainer.py" ]
[ "from __future__ import print_function\nfrom six.moves import range\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\n\nfrom PIL import Image\n\nfrom miscc.config import cfg\nfrom miscc.utils import mkdir_p\nfrom miscc.utils import build_super_images, build_super_images2\nfrom miscc.utils import weights_init, load_params, copy_G_params\nfrom model import G_DCGAN, G_NET\nfrom datasets import prepare_data\nfrom model import RNN_ENCODER, CNN_ENCODER\n\nfrom miscc.losses import words_loss\nfrom miscc.losses import discriminator_loss, generator_loss, KL_loss\nimport os\nimport time\nimport numpy as np\nimport sys\n\n# ################# Text to image task############################ #\nclass condGANTrainer(object):\n def __init__(self, output_dir, data_loader, n_words, ixtoword):\n if cfg.TRAIN.FLAG:\n self.model_dir = os.path.join(output_dir, 'Model')\n self.image_dir = os.path.join(output_dir, 'Image')\n mkdir_p(self.model_dir)\n mkdir_p(self.image_dir)\n\n torch.cuda.set_device(cfg.GPU_ID)\n cudnn.benchmark = True\n\n self.batch_size = cfg.TRAIN.BATCH_SIZE\n self.max_epoch = cfg.TRAIN.MAX_EPOCH\n self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL\n\n self.n_words = n_words\n self.ixtoword = ixtoword\n self.data_loader = data_loader\n self.num_batches = len(self.data_loader)\n\n def build_models(self):\n # ###################encoders######################################## #\n if cfg.TRAIN.NET_E == '':\n print('Error: no pretrained text-image encoders')\n return\n\n image_encoder = CNN_ENCODER(cfg.TEXT.EMBEDDING_DIM)\n img_encoder_path = cfg.TRAIN.NET_E.replace('text_encoder', 'image_encoder')\n state_dict = \\\n torch.load(img_encoder_path, map_location=lambda storage, loc: storage)\n image_encoder.load_state_dict(state_dict)\n for p in image_encoder.parameters():\n p.requires_grad = False\n print('Load image encoder from:', img_encoder_path)\n image_encoder.eval()\n\n text_encoder = \\\n RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)\n state_dict = \\\n torch.load(cfg.TRAIN.NET_E,\n map_location=lambda storage, loc: storage)\n text_encoder.load_state_dict(state_dict)\n for p in text_encoder.parameters():\n p.requires_grad = False\n print('Load text encoder from:', cfg.TRAIN.NET_E)\n text_encoder.eval()\n\n # #######################generator and discriminators############## #\n netsD = []\n if cfg.GAN.B_DCGAN:\n if cfg.TREE.BRANCH_NUM ==1:\n from model import D_NET64 as D_NET\n elif cfg.TREE.BRANCH_NUM == 2:\n from model import D_NET128 as D_NET\n else: # cfg.TREE.BRANCH_NUM == 3:\n from model import D_NET256 as D_NET\n # TODO: elif cfg.TREE.BRANCH_NUM > 3:\n netG = G_DCGAN()\n netsD = [D_NET(b_jcu=False)]\n else:\n from model import D_NET64, D_NET128, D_NET256\n netG = G_NET()\n if cfg.TREE.BRANCH_NUM > 0:\n netsD.append(D_NET64())\n if cfg.TREE.BRANCH_NUM > 1:\n netsD.append(D_NET128())\n if cfg.TREE.BRANCH_NUM > 2:\n netsD.append(D_NET256())\n # TODO: if cfg.TREE.BRANCH_NUM > 3:\n netG.apply(weights_init)\n # print(netG)\n for i in range(len(netsD)):\n netsD[i].apply(weights_init)\n # print(netsD[i])\n print('# of netsD', len(netsD))\n #\n epoch = 0\n if cfg.TRAIN.NET_G != '':\n state_dict = \\\n torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage)\n netG.load_state_dict(state_dict)\n print('Load G from: ', cfg.TRAIN.NET_G)\n istart = cfg.TRAIN.NET_G.rfind('_') + 1\n iend = cfg.TRAIN.NET_G.rfind('.')\n epoch = cfg.TRAIN.NET_G[istart:iend]\n epoch = int(epoch) + 1\n if cfg.TRAIN.B_NET_D:\n Gname = cfg.TRAIN.NET_G\n for i in range(len(netsD)):\n s_tmp = Gname[:Gname.rfind('/')]\n Dname = '%s/netD%d.pth' % (s_tmp, i)\n print('Load D from: ', Dname)\n state_dict = \\\n torch.load(Dname, map_location=lambda storage, loc: storage)\n netsD[i].load_state_dict(state_dict)\n # ########################################################### #\n if cfg.CUDA:\n text_encoder = text_encoder.cuda()\n image_encoder = image_encoder.cuda()\n netG.cuda()\n for i in range(len(netsD)):\n netsD[i].cuda()\n return [text_encoder, image_encoder, netG, netsD, epoch]\n\n def define_optimizers(self, netG, netsD):\n optimizersD = []\n num_Ds = len(netsD)\n for i in range(num_Ds):\n opt = optim.Adam(netsD[i].parameters(),\n lr=cfg.TRAIN.DISCRIMINATOR_LR,\n betas=(0.5, 0.999))\n optimizersD.append(opt)\n\n optimizerG = optim.Adam(netG.parameters(),\n lr=cfg.TRAIN.GENERATOR_LR,\n betas=(0.5, 0.999))\n\n return optimizerG, optimizersD\n\n def prepare_labels(self):\n batch_size = self.batch_size\n real_labels = Variable(torch.FloatTensor(batch_size).fill_(1))\n fake_labels = Variable(torch.FloatTensor(batch_size).fill_(0))\n match_labels = Variable(torch.LongTensor(range(batch_size)))\n if cfg.CUDA:\n real_labels = real_labels.cuda()\n fake_labels = fake_labels.cuda()\n match_labels = match_labels.cuda()\n\n return real_labels, fake_labels, match_labels\n\n def save_model(self, netG, avg_param_G, netsD, epoch):\n backup_para = copy_G_params(netG)\n load_params(netG, avg_param_G)\n torch.save(netG.state_dict(),\n '%s/netG_epoch_%d.pth' % (self.model_dir, epoch))\n load_params(netG, backup_para)\n #\n for i in range(len(netsD)):\n netD = netsD[i]\n torch.save(netD.state_dict(),\n '%s/netD%d.pth' % (self.model_dir, i))\n print('Save G/Ds models.')\n\n def set_requires_grad_value(self, models_list, brequires):\n for i in range(len(models_list)):\n for p in models_list[i].parameters():\n p.requires_grad = brequires\n\n def save_img_results(self, netG, noise, sent_emb, words_embs, mask,\n image_encoder, captions, cap_lens,\n gen_iterations, name='current'):\n # Save images\n fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs, mask)\n for i in range(len(attention_maps)):\n if len(fake_imgs) > 1:\n img = fake_imgs[i + 1].detach().cpu()\n lr_img = fake_imgs[i].detach().cpu()\n else:\n img = fake_imgs[0].detach().cpu()\n lr_img = None\n attn_maps = attention_maps[i]\n att_sze = attn_maps.size(2)\n img_set, _ = \\\n build_super_images(img, captions, self.ixtoword,\n attn_maps, att_sze, lr_imgs=lr_img)\n if img_set is not None:\n im = Image.fromarray(img_set)\n fullpath = '%s/G_%s_%d_%d.png'\\\n % (self.image_dir, name, gen_iterations, i)\n im.save(fullpath)\n\n # for i in range(len(netsD)):\n i = -1\n img = fake_imgs[i].detach()\n region_features, _ = image_encoder(img)\n att_sze = region_features.size(2)\n _, _, att_maps = words_loss(region_features.detach(),\n words_embs.detach(),\n None, cap_lens,\n None, self.batch_size)\n img_set, _ = \\\n build_super_images(fake_imgs[i].detach().cpu(),\n captions, self.ixtoword, att_maps, att_sze)\n if img_set is not None:\n im = Image.fromarray(img_set)\n fullpath = '%s/D_%s_%d.png'\\\n % (self.image_dir, name, gen_iterations)\n im.save(fullpath)\n\n def train(self):\n text_encoder, image_encoder, netG, netsD, start_epoch = self.build_models()\n avg_param_G = copy_G_params(netG)\n optimizerG, optimizersD = self.define_optimizers(netG, netsD)\n real_labels, fake_labels, match_labels = self.prepare_labels()\n\n batch_size = self.batch_size\n nz = cfg.GAN.Z_DIM\n noise = Variable(torch.FloatTensor(batch_size, nz))\n fixed_noise = Variable(torch.FloatTensor(batch_size, nz).normal_(0, 1))\n if cfg.CUDA:\n noise, fixed_noise = noise.cuda(), fixed_noise.cuda()\n\n gen_iterations = 0\n # gen_iterations = start_epoch * self.num_batches\n for epoch in range(start_epoch, self.max_epoch):\n start_t = time.time()\n\n data_iter = iter(self.data_loader)\n step = 0\n while step < self.num_batches:\n # reset requires_grad to be trainable for all Ds\n # self.set_requires_grad_value(netsD, True)\n\n ######################################################\n # (1) Prepare training data and Compute text embeddings\n ######################################################\n data = data_iter.next()\n imgs, captions, cap_lens, class_ids, keys = prepare_data(data)\n\n hidden = text_encoder.init_hidden(batch_size)\n # words_embs: batch_size x nef x seq_len\n # sent_emb: batch_size x nef\n words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)\n words_embs, sent_emb = words_embs.detach(), sent_emb.detach()\n mask = (captions == 0)\n num_words = words_embs.size(2)\n if mask.size(1) > num_words:\n mask = mask[:, :num_words]\n\n #######################################################\n # (2) Generate fake images\n ######################################################\n noise.data.normal_(0, 1)\n fake_imgs, _, mu, logvar = netG(noise, sent_emb, words_embs, mask)\n\n #######################################################\n # (3) Update D network\n ######################################################\n errD_total = 0\n D_logs = ''\n for i in range(len(netsD)):\n netsD[i].zero_grad()\n errD = discriminator_loss(netsD[i], imgs[i], fake_imgs[i],\n sent_emb, real_labels, fake_labels)\n # backward and update parameters\n errD.backward()\n optimizersD[i].step()\n errD_total += errD\n D_logs += 'errD%d: %.2f ' % (i, errD.data[0])\n\n #######################################################\n # (4) Update G network: maximize log(D(G(z)))\n ######################################################\n # compute total loss for training G\n step += 1\n gen_iterations += 1\n\n # do not need to compute gradient for Ds\n # self.set_requires_grad_value(netsD, False)\n netG.zero_grad()\n errG_total, G_logs = \\\n generator_loss(netsD, image_encoder, fake_imgs, real_labels,\n words_embs, sent_emb, match_labels, cap_lens, class_ids)\n kl_loss = KL_loss(mu, logvar)\n errG_total += kl_loss\n G_logs += 'kl_loss: %.2f ' % kl_loss.data[0]\n # backward and update parameters\n errG_total.backward()\n optimizerG.step()\n for p, avg_p in zip(netG.parameters(), avg_param_G):\n avg_p.mul_(0.999).add_(0.001, p.data)\n\n if gen_iterations % 100 == 0:\n print(D_logs + '\\n' + G_logs)\n # save images\n if gen_iterations % 1000 == 0:\n backup_para = copy_G_params(netG)\n load_params(netG, avg_param_G)\n self.save_img_results(netG, fixed_noise, sent_emb,\n words_embs, mask, image_encoder,\n captions, cap_lens, epoch, name='average')\n load_params(netG, backup_para)\n #\n # self.save_img_results(netG, fixed_noise, sent_emb,\n # words_embs, mask, image_encoder,\n # captions, cap_lens,\n # epoch, name='current')\n end_t = time.time()\n\n print('''[%d/%d][%d]\n Loss_D: %.2f Loss_G: %.2f Time: %.2fs'''\n % (epoch, self.max_epoch, self.num_batches,\n errD_total.data[0], errG_total.data[0],\n end_t - start_t))\n\n if epoch % cfg.TRAIN.SNAPSHOT_INTERVAL == 0: # and epoch != 0:\n self.save_model(netG, avg_param_G, netsD, epoch)\n\n self.save_model(netG, avg_param_G, netsD, self.max_epoch)\n\n def save_singleimages(self, images, filenames, save_dir,\n split_dir, sentenceID=0):\n for i in range(images.size(0)):\n s_tmp = '%s/single_samples/%s/%s' %\\\n (save_dir, split_dir, filenames[i])\n folder = s_tmp[:s_tmp.rfind('/')]\n if not os.path.isdir(folder):\n print('Make a new folder: ', folder)\n mkdir_p(folder)\n\n fullpath = '%s_%d.jpg' % (s_tmp, sentenceID)\n # range from [-1, 1] to [0, 1]\n # img = (images[i] + 1.0) / 2\n img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte()\n # range from [0, 1] to [0, 255]\n ndarr = img.permute(1, 2, 0).data.cpu().numpy()\n im = Image.fromarray(ndarr)\n im.save(fullpath)\n\n def sampling(self, split_dir):\n if cfg.TRAIN.NET_G == '':\n print('Error: the path for morels is not found!')\n else:\n if split_dir == 'test':\n split_dir = 'valid'\n # Build and load the generator\n if cfg.GAN.B_DCGAN:\n netG = G_DCGAN()\n else:\n netG = G_NET()\n netG.apply(weights_init)\n netG.cuda()\n netG.eval()\n #\n text_encoder = RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)\n state_dict = \\\n torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)\n text_encoder.load_state_dict(state_dict)\n print('Load text encoder from:', cfg.TRAIN.NET_E)\n text_encoder = text_encoder.cuda()\n text_encoder.eval()\n\n batch_size = self.batch_size\n nz = cfg.GAN.Z_DIM\n noise = Variable(torch.FloatTensor(batch_size, nz), volatile=True)\n noise = noise.cuda()\n\n model_dir = cfg.TRAIN.NET_G\n state_dict = \\\n torch.load(model_dir, map_location=lambda storage, loc: storage)\n # state_dict = torch.load(cfg.TRAIN.NET_G)\n netG.load_state_dict(state_dict)\n print('Load G from: ', model_dir)\n\n # the path to save generated images\n s_tmp = model_dir[:model_dir.rfind('.pth')]\n save_dir = '%s/%s' % (s_tmp, split_dir)\n mkdir_p(save_dir)\n\n cnt = 0\n\n for _ in range(1): # (cfg.TEXT.CAPTIONS_PER_IMAGE):\n for step, data in enumerate(self.data_loader, 0):\n cnt += batch_size\n if step % 100 == 0:\n print('step: ', step)\n # if step > 50:\n # break\n\n imgs, captions, cap_lens, class_ids, keys = prepare_data(data)\n\n hidden = text_encoder.init_hidden(batch_size)\n # words_embs: batch_size x nef x seq_len\n # sent_emb: batch_size x nef\n words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)\n words_embs, sent_emb = words_embs.detach(), sent_emb.detach()\n mask = (captions == 0)\n num_words = words_embs.size(2)\n if mask.size(1) > num_words:\n mask = mask[:, :num_words]\n\n #######################################################\n # (2) Generate fake images\n ######################################################\n noise.data.normal_(0, 1)\n fake_imgs, _, _, _ = netG(noise, sent_emb, words_embs, mask)\n for j in range(batch_size):\n s_tmp = '%s/single/%s' % (save_dir, keys[j])\n folder = s_tmp[:s_tmp.rfind('/')]\n if not os.path.isdir(folder):\n print('Make a new folder: ', folder)\n mkdir_p(folder)\n k = -1\n # for k in range(len(fake_imgs)):\n im = fake_imgs[k][j].data.cpu().numpy()\n # [-1, 1] --> [0, 255]\n im = (im + 1.0) * 127.5\n im = im.astype(np.uint8)\n im = np.transpose(im, (1, 2, 0))\n im = Image.fromarray(im)\n fullpath = '%s_s%d.png' % (s_tmp, k)\n im.save(fullpath)\n\n def gen_example(self, data_dic, noises=None):\n if cfg.TRAIN.NET_G == '':\n print('Error: the path for morels is not found!')\n else:\n # Build and load the generator\n text_encoder = \\\n RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)\n state_dict = \\\n torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)\n text_encoder.load_state_dict(state_dict)\n print('Load text encoder from:', cfg.TRAIN.NET_E)\n text_encoder = text_encoder.cuda()\n text_encoder.eval()\n\n # the path to save generated images\n if cfg.GAN.B_DCGAN:\n netG = G_DCGAN()\n else:\n netG = G_NET()\n s_tmp = cfg.TRAIN.NET_G[:cfg.TRAIN.NET_G.rfind('.pth')]\n model_dir = cfg.TRAIN.NET_G\n state_dict = \\\n torch.load(model_dir, map_location=lambda storage, loc: storage)\n netG.load_state_dict(state_dict)\n print('Load G from: ', model_dir)\n netG.cuda()\n netG.eval()\n for key in data_dic:\n save_dir = '%s/%s' % (s_tmp, key)\n mkdir_p(save_dir)\n captions, cap_lens, sorted_indices = data_dic[key]\n\n batch_size = captions.shape[0]\n nz = cfg.GAN.Z_DIM\n captions = Variable(torch.from_numpy(captions), volatile=True)\n cap_lens = Variable(torch.from_numpy(cap_lens), volatile=True)\n\n captions = captions.cuda()\n cap_lens = cap_lens.cuda()\n for i in range(1): # 16\n if noises is None:\n #######################################################\n # (1) Extract text embeddings\n ######################################################\n hidden = text_encoder.init_hidden(batch_size)\n # words_embs: batch_size x nef x seq_len\n # sent_emb: batch_size x nef\n words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)\n mask = (captions == 0)\n #######################################################\n # (2) Generate fake images\n ######################################################\n \n noise = Variable(torch.FloatTensor(batch_size, nz), volatile=True)\n noise = noise.cuda()\n noise.data.normal_(0, 1)\n fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs, mask)\n # G attention\n cap_lens_np = cap_lens.cpu().data.numpy()\n for j in range(batch_size):\n save_name = '%s/%d_s_%d' % (save_dir, i, sorted_indices[j])\n for k in range(len(fake_imgs)):\n im = fake_imgs[k][j].data.cpu().numpy()\n im = (im + 1.0) * 127.5\n im = im.astype(np.uint8)\n # print('im', im.shape)\n im = np.transpose(im, (1, 2, 0))\n # print('im', im.shape)\n im = Image.fromarray(im)\n fullpath = '%s_g%d.png' % (save_name, k)\n im.save(fullpath)\n\n for k in range(len(attention_maps)):\n if len(fake_imgs) > 1:\n im = fake_imgs[k + 1].detach().cpu()\n else:\n im = fake_imgs[0].detach().cpu()\n attn_maps = attention_maps[k]\n att_sze = attn_maps.size(2)\n img_set, sentences = \\\n build_super_images2(im[j].unsqueeze(0),\n captions[j].unsqueeze(0),\n [cap_lens_np[j]], self.ixtoword,\n [attn_maps[j]], att_sze)\n if img_set is not None:\n im = Image.fromarray(img_set)\n fullpath = '%s_a%d.png' % (save_name, k)\n im.save(fullpath)\n else:\n #######################################################\n # (1) Extract text embeddings\n ######################################################\n hidden = text_encoder.init_hidden(batch_size)\n # words_embs: batch_size x nef x seq_len\n # sent_emb: batch_size x nef\n words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)\n mask = (captions == 0)\n #######################################################\n # (2) Generate fake images\n ######################################################\n for zi, noise in enumerate(noises):\n noise = np.repeat([noise], batch_size, axis=0)\n noise = Variable(torch.from_numpy(noise), volatile=True)\n noise = noise.cuda()\n fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs, mask)\n # G attention\n cap_lens_np = cap_lens.cpu().data.numpy()\n for j in range(batch_size):\n save_name = '%s/%d_s_%d' % (save_dir, i, sorted_indices[j])\n for k in range(len(fake_imgs)):\n im = fake_imgs[k][j].data.cpu().numpy()\n im = (im + 1.0) * 127.5\n im = im.astype(np.uint8)\n # print('im', im.shape)\n im = np.transpose(im, (1, 2, 0))\n # print('im', im.shape)\n im = Image.fromarray(im)\n fullpath = '%s_g%d_z%d.png' % (save_name, k, zi)\n im.save(fullpath)\n\n for k in range(len(attention_maps)):\n if len(fake_imgs) > 1:\n im = fake_imgs[k + 1].detach().cpu()\n else:\n im = fake_imgs[0].detach().cpu()\n attn_maps = attention_maps[k]\n att_sze = attn_maps.size(2)\n img_set, sentences = \\\n build_super_images2(im[j].unsqueeze(0),\n captions[j].unsqueeze(0),\n [cap_lens_np[j]], self.ixtoword,\n [attn_maps[j]], att_sze)\n if img_set is not None:\n im = Image.fromarray(img_set)\n fullpath = '%s_a%d_z%d.png' % (save_name, k, zi)\n im.save(fullpath)\n" ]
[ [ "torch.cuda.set_device", "torch.load", "torch.from_numpy", "torch.FloatTensor", "numpy.transpose", "numpy.repeat" ] ]
vibhoothi/awcy
[ "172edeeca8c331d63ee72f6a3928fc6b2965d845" ]
[ "dump_convex_hull.py" ]
[ "#!/usr/bin/env python3\n\nfrom __future__ import print_function\n\nfrom numpy import *\nimport numpy as np\nfrom scipy import *\nfrom scipy.interpolate import interp1d\nfrom scipy.interpolate import pchip\nfrom scipy.interpolate import BPoly\nfrom scipy._lib._util import _asarray_validated\nimport sys\nimport os\nimport argparse\nimport json\nimport subprocess\nimport xlrd\nimport re\n\nparser = argparse.ArgumentParser(description='Dump convex hull')\nparser.add_argument('xls',nargs=1,help='xls file to dump')\nargs = parser.parse_args()\n\nmet_index_as = {\"PSNR Y (libvmaf)\": 11, \"PSNR Cb (libvmaf)\": 18, \"PSNR Cr (libvmaf)\": 25,\n \"CIEDE2000 (libvmaf)\": 74, \"SSIM (libvmaf)\": 39, \"MS-SSIM (libvmaf)\": 46,\n \"PSNR-HVS (libvmaf)\": 67, \"VMAF\": 53, \"VMAF-NEG\": 60}\n\nresolutions = ['3840x2160', '2560x1440', '1920x1080', '1280x720', '960x540', '640x360']\n\nerror_strings = []\n\ndef dump_as(file1):\n ret = {}\n a_xls = xlrd.open_workbook(file1)\n a_sh = a_xls.sheet_by_index(0)\n for metric in met_index_as:\n if metric not in met_index_as:\n return\n ra = []\n ya = []\n for c in range(1,a_sh.ncols):\n y = a_sh.cell_value(colx=c, rowx=met_index_as[metric] - 1 + 4)\n if (y == ''):\n continue\n ya.append(y)\n ra.append(a_sh.cell_value(colx=c, rowx=met_index_as[metric] - 1 + 5))\n ra = np.flipud(ra)\n ya = np.flipud(ya)\n ret[metric] = {\"Bitrate\": ra.tolist(), \"Metric\": ya.tolist()}\n return ret\n\n# generate xls for each of the two runs:\n#or run in args.run:\n# subprocess.run(['python3', 'convexhull_framework/src/AWCYConvexHullTest.py', run], check=True)\n\nret = dump_as(args.xls[0])\nprint(json.dumps(ret))\n" ]
[ [ "numpy.flipud" ] ]
RichardJ112/nebula_dynamic_2021
[ "6e120f4562e46cd02d981e75aa4238d16ee0e9c8" ]
[ "nebula_test_files/vox_tri_pri/gen_pri_wall.py" ]
[ "import numpy as np\nimport os\n\n# Point exposure for Wall Depositions\n\n#Automation Additions\nHPC_toggle = False\nmulti_run_folder_toggle = False# single or multi folder (True is multi,False is single)\ndesktop_toggle = True\n\ndate = \"/20_8_2021/\"\n\n# Parameters Geom\nvoxel_size = 0.3; # voxel size in nanometers (0.27 nm is appr. one atom of Si)(Smith uses 0.25nm for Tungsten(W))\nsize_x= 401 # horizontal size in the x direction in voxels (now for +/- x)\nsize_y = 401 # horizontal size in the y direction in voxels (now for +/- y)\nsize_z = 1001 # vertical size in voxels\nvolume = size_x*size_y*size_z # total voxel volume\nvoxel_size_pm = int(voxel_size*1000)\n\n# Parameters Pri\nnum_x = int(11) # number of pillars in the x direction\nnum_y = int(21) #number of pillars in the y direction\nnum_x_range = np.linspace(-(num_x-1)/2,(num_x-1)/2,num_x)\nnum_y_range = np.linspace(-(num_y-1)/2,(num_y-1)/2,num_y)\nx = size_x*voxel_size/2 # starting x\ny = size_y*voxel_size/2 # starting y\nz = 1 # starting z in nm\nN = int(1e3) # Number of electrons per pillar\nseq_lines = 100\nenergy = 1000 # Beam energy, in eV\nsigma_beam = 1 # Beam standard deviation in nm\nsigma_beam_pm = round(sigma_beam*1000) # Beam standard deviation in pm\ntot_e = int(num_x*num_y*N)\n\n\n#line_pitch_x_list = [6,6.5,7] \n#line_pitch_x_list = [3,3.5,4] \nline_pitch_x_list = [4.5] \n#line_pitch_x_list = np.around(np.linspace(3,4,num=10),decimals=1) \n\n#line_pitch_y_list = [1.7,1.7,1.7] #space between points nm\nline_pitch_y_list = np.ones(len(line_pitch_x_list))*1.7\n\n#line_pitch_list = np.around(np.linspace(1,5,num=20),decimals=1) #space between points nm\ndep_strat = \"seq_ll\" #seq_ll (sequential layered lines), seq_d (sequential diagonals) \n\n# This is a numpy datatype that corresponds to pri files\ndt = np.dtype([\n\t('x', np.float32), ('y', np.float32), ('z', np.float32), # Starting position\n\t('dx', np.float32), ('dy', np.float32), ('dz', np.float32), # Starting direction\n\t('K', np.float32), # Starting energy\n\t('px', np.uint32), ('py', np.uint32)]) # Pixel index\n\ntotal_runs = len(line_pitch_y_list)\n# For now only iterates over line_pitches --> may update to include any possible combination of parameters \n\n#Folder Location\nif multi_run_folder_toggle:\n #Multi-Runs - Run all files in folder (for multi-runs exectuable)\n if HPC_toggle:\n file_path = \"/home/richarddejong/nebula_test_files/vox_tri_pri/multi_runs\"+date \n elif desktop_toggle:\n file_path = \"C:/Users/Richard/source/repos/Nebula/nebula_test_files/vox_tri_pri/multi_runs\"+date\n else:\n file_path = \"C:/Users/richa/Documents/repos/nebula_test_files/vox_tri_pri/multi_runs\"+date\nelse:\n #Single Runs - Run a single file (for single-run executable)\n if HPC_toggle:\n file_path = \"/home/richarddejong/nebula_test_files/vox_tri_pri/single_runs\"+date \n elif desktop_toggle:\n file_path = \"C:/Users/Richard/source/repos/Nebula/nebula_test_files/vox_tri_pri/single_runs\"+date\n else:\n file_path = \"C:/Users/richa/Documents/repos/nebula_test_files/vox_tri_pri/single_runs\"+date\n\nif dep_strat == \"seq_d\":\n for i in range(total_runs):\n # Update Iterative Parameters\n line_pitch_x = line_pitch_x_list[i]\n line_pitch_x_pm = int(line_pitch_x*1000)\n line_pitch_y = line_pitch_y_list[i]\n line_pitch_y_pm = int(line_pitch_y*1000) \n\n #title creation\n title_pri = str(int(energy/1000))+\"keV_\"+str(num_x)+\"_\"+str(num_y)+\"_\"+str(int(N/1000))+\"kpp_pitchx_\"+str(line_pitch_x_pm)+\"_pitchy_\"+str(line_pitch_y_pm)+\"_\"+dep_strat+\"_\"\n title_geom = str(size_x)+\"_\"+str(size_y)+\"_\"+str(size_z)+\"_sb_\"+str(sigma_beam_pm)+\"_vs_\"+str(voxel_size_pm)\n title = title_pri+title_geom+\".pri\"\n \n x_p = []\n y_p = []\n inputx = np.zeros(tot_e)\n inputy = np.zeros(tot_e)\n\n for k in num_x_range:\n for j in num_y_range:\n startx = k*line_pitch_x+x\n starty = j*line_pitch_y+y\n xj = np.random.normal(startx, sigma_beam, N)\n yj = np.random.normal(starty, sigma_beam, N)\n x_p.append(xj)\n y_p.append(yj)\n\n for j in range(num_y*num_x):\n inputx[j*N:N*(j+1)] = x_p[j]\n inputy[j*N:N*(j+1)] = y_p[j]\n\n # Open file\n\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n\n with open(file_path+title, 'wb') as file:\n # Allocate numpy buffer\n array = np.empty(tot_e, dtype=dt)\n\n # Fill with data\n array['x'] = inputx\n array['y'] = inputy\n array['z'] = z\n array['dx'] = 0\n array['dy'] = 0\n array['dz'] = 1\n array['K'] = energy\n array['px'] = 0\n array['py'] = 0\n\n # Write buffer to file\n array.tofile(file)\n\n #Progress Tracker\n if i%10 == 0:\n print(\"Creating Primary Files :\" + str(round(i/total_runs*100)) + \"%\")\n\nif dep_strat == \"seq_ll\":\n for i in range(total_runs):\n # Update Iterative Parameters\n line_pitch_x = line_pitch_x_list[i]\n line_pitch_x_pm = int(line_pitch_x*1000)\n line_pitch_y = line_pitch_y_list[i]\n line_pitch_y_pm = int(line_pitch_y*1000) \n\n #title creation\n title_pri = str(int(energy/1000))+\"keV_\"+str(num_x)+\"_\"+str(num_y)+\"_\"+str(int(N/1000))+\"kpp_pitchx_\"+str(line_pitch_x_pm)+\"_pitchy_\"+str(line_pitch_y_pm)+\"_\"+dep_strat+\"_\"\n title_geom = str(size_x)+\"_\"+str(size_y)+\"_\"+str(size_z)+\"_sb_\"+str(sigma_beam_pm)+\"_vs_\"+str(voxel_size_pm)\n title = title_pri+title_geom+\".pri\"\n\n x_p = []\n y_p = []\n inputx = np.zeros(tot_e)\n inputy = np.zeros(tot_e)\n N_seq = int(N/seq_lines)\n num_line = num_y*seq_lines\n num_sum = 0\n for k in num_x_range: #We switch x and y\n starty = k*line_pitch_y+y\n for i in range(seq_lines): \n for j in num_y_range:\n startx = j*line_pitch_x+x\n xj = np.random.normal(startx, sigma_beam, N_seq)\n yj = np.random.normal(starty, sigma_beam, N_seq)\n x_p.append(xj)\n y_p.append(yj)\n for j in range(num_line):\n inputx[(j+num_sum)*N_seq:N_seq*(j+1+num_sum)] = x_p[j+num_sum]\n inputy[(j+num_sum)*N_seq:N_seq*(j+1+num_sum)] = y_p[j+num_sum]\n num_sum+=num_line\n # Open file\n\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n\n with open(file_path+title, 'wb') as file:\n # Allocate numpy buffer\n array = np.empty(num_y*N*num_x, dtype=dt)\n\n # Fill with data\n array['x'] = inputx\n array['y'] = inputy\n array['z'] = z\n array['dx'] = 0\n array['dy'] = 0\n array['dz'] = 1\n array['K'] = energy\n array['px'] = 0\n array['py'] = 0\n\n # Write buffer to file\n array.tofile(file)\n\n #Progress Tracker\n if i%10 == 0:\n print(\"Creating Primary Files :\" + str(round(i/total_runs*100)) + \"%\")\n" ]
[ [ "numpy.linspace", "numpy.dtype", "numpy.random.normal", "numpy.zeros", "numpy.empty" ] ]
mcx/tensorflow
[ "d7e521a1ad21681855b439b9c2a05837c804e488" ]
[ "tensorflow/python/distribute/sharded_variable_test.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for ShardedVariable.\"\"\"\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom tensorflow.python.checkpoint import checkpoint as util\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.compat import v2_compat\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import distribution_strategy_context as ds_context\nfrom tensorflow.python.distribute import parameter_server_strategy_v2\nfrom tensorflow.python.distribute import sharded_variable\nfrom tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver\nfrom tensorflow.python.distribute.test_util import get_cluster_def\nfrom tensorflow.python.distribute.test_util import TestClusterParams\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import indexed_slices\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import load\nfrom tensorflow.python.saved_model import loader\nfrom tensorflow.python.saved_model import save\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.saved_model import tag_constants\nfrom tensorflow.python.trackable import autotrackable\nfrom tensorflow.python.training.server_lib import ClusterSpec\nfrom tensorflow.python.util import nest\n\n# We create one cluster to share between tests. The cluster should be large\n# enough to accommodate all the tests. Adjust the following constants as needed\n# but be aware of resource limitations in OSS tests.\ntest_cluster_params = TestClusterParams(None, 2, 3)\n\n\ndef _load_and_run(\n model_dir,\n inputs,\n signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):\n \"\"\"Load a SavedModel into a TF 1.x-style graph and run `signature_key`.\"\"\"\n graph = ops.Graph()\n with graph.as_default(), session_lib.Session() as session:\n meta_graph_def = loader.load(session, [tag_constants.SERVING], model_dir)\n signature = meta_graph_def.signature_def[signature_key]\n feed_dict = {}\n for arg_name in inputs.keys():\n input_tensor = session.graph.get_tensor_by_name(\n signature.inputs[arg_name].name)\n feed_dict[input_tensor] = inputs[arg_name]\n output_dict = {}\n for output_name, output_tensor_info in signature.outputs.items():\n output_dict[output_name] = session.graph.get_tensor_by_name(\n output_tensor_info.name)\n return session.run(output_dict, feed_dict=feed_dict)\n\n\nclass PartitionerTest(test.TestCase):\n\n def test_fixed_shards_partitioner(self):\n partitioner = sharded_variable.FixedShardsPartitioner(num_shards=2)\n got = partitioner(tensor_shape.TensorShape([10, 3]), dtypes.float32)\n self.assertAllEqual(got, [2, 1])\n\n def test_min_size_partitioner(self):\n partitioner = sharded_variable.MinSizePartitioner(\n min_shard_bytes=4, max_shards=2)\n got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)\n self.assertAllEqual(got, [2, 1])\n\n partitioner = sharded_variable.MinSizePartitioner(\n min_shard_bytes=4, max_shards=10)\n got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)\n self.assertAllEqual(got, [6, 1])\n\n def test_max_size_partitioner(self):\n partitioner = sharded_variable.MaxSizePartitioner(max_shard_bytes=4)\n got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)\n self.assertAllEqual(got, [6, 1])\n\n partitioner = sharded_variable.MaxSizePartitioner(\n max_shard_bytes=4, max_shards=2)\n got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)\n self.assertAllEqual(got, [2, 1])\n\n partitioner = sharded_variable.MaxSizePartitioner(max_shard_bytes=1024)\n got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)\n self.assertAllEqual(got, [1, 1])\n\n\nclass ShardedVariableTest(test.TestCase, parameterized.TestCase):\n\n def test_sharded_variable_simple(self):\n v0 = variables_lib.Variable([0])\n v1 = variables_lib.Variable([1])\n s = sharded_variable.ShardedVariable([v0, v1], name='s')\n self.assertEqual(s.variables[0], v0)\n self.assertEqual(s.variables[1], v1)\n self.assertEqual(s.shape.as_list(), [2])\n self.assertEqual(s.dtype, v0.dtype)\n self.assertEqual(s.name, 's')\n\n def test_assign(self):\n v0 = variables_lib.Variable([[0, 0]])\n v1 = variables_lib.Variable([[1, 1], [2, 2]])\n v2 = variables_lib.Variable([[3, 3]])\n s = sharded_variable.ShardedVariable([v0, v1, v2])\n ret = s.assign([[4, 4], [5, 5], [6, 6], [7, 7]])\n self.assertAllEqual(self.evaluate(s.variables[0]), [[4, 4]])\n self.assertAllEqual(self.evaluate(s.variables[1]), [[5, 5], [6, 6]])\n self.assertAllEqual(self.evaluate(s.variables[2]), [[7, 7]])\n self.assertIs(ret, s)\n\n def test_assign_add(self):\n v0 = variables_lib.Variable([[0, 0]])\n v1 = variables_lib.Variable([[1, 1], [2, 2]])\n v2 = variables_lib.Variable([[3, 3]])\n s = sharded_variable.ShardedVariable([v0, v1, v2])\n ret = s.assign_add([[1, 1], [1, 1], [2, 2], [2, 2]])\n self.assertAllEqual(self.evaluate(s.variables[0]), [[1, 1]])\n self.assertAllEqual(self.evaluate(s.variables[1]), [[2, 2], [4, 4]])\n self.assertAllEqual(self.evaluate(s.variables[2]), [[5, 5]])\n self.assertIs(ret, s)\n\n def test_assign_sub(self):\n v0 = variables_lib.Variable([[0, 0]])\n v1 = variables_lib.Variable([[1, 1], [2, 2]])\n v2 = variables_lib.Variable([[3, 3]])\n s = sharded_variable.ShardedVariable([v0, v1, v2])\n ret = s.assign_sub([[0, 0], [1, 1], [1, 1], [3, 3]])\n self.assertAllEqual(self.evaluate(s.variables[0]), [[0, 0]])\n self.assertAllEqual(self.evaluate(s.variables[1]), [[0, 0], [1, 1]])\n self.assertAllEqual(self.evaluate(s.variables[2]), [[0, 0]])\n self.assertIs(ret, s)\n\n def test_scatter_add_uneven_partition(self):\n v = variables_lib.Variable(array_ops.zeros((32, 1)))\n sparse_delta = indexed_slices.IndexedSlices(\n values=constant_op.constant([[0.], [1.], [2.], [3.], [4.], [5.]]),\n indices=constant_op.constant([0, 10, 11, 12, 30, 31]))\n\n v0 = variables_lib.Variable(array_ops.zeros((11, 1)))\n v1 = variables_lib.Variable(array_ops.zeros((11, 1)))\n v2 = variables_lib.Variable(array_ops.zeros((10, 1)))\n sv = sharded_variable.ShardedVariable([v0, v1, v2])\n\n v.scatter_add(sparse_delta)\n sv.scatter_add(sparse_delta)\n self.assertAllEqual(v, ops.convert_to_tensor(sv))\n\n @def_function.function\n def func():\n v.scatter_add(sparse_delta)\n sv.scatter_add(sparse_delta)\n\n func()\n self.assertAllEqual(v, ops.convert_to_tensor(sv))\n\n @parameterized.parameters('scatter_add', 'scatter_div', 'scatter_max',\n 'scatter_min', 'scatter_mul', 'scatter_sub',\n 'scatter_update')\n def test_scatter_ops_even_partition(self, op):\n v = variables_lib.Variable(array_ops.zeros((30, 1)))\n # Make sure values does not contain 0 due to testing `scatter_div`!\n sparse_delta = indexed_slices.IndexedSlices(\n values=constant_op.constant([[1.], [2.], [3.], [4.], [5.]]),\n indices=constant_op.constant([0, 10, 12, 21, 22]))\n\n v0 = variables_lib.Variable(array_ops.zeros((10, 1)))\n v1 = variables_lib.Variable(array_ops.zeros((10, 1)))\n v2 = variables_lib.Variable(array_ops.zeros((10, 1)))\n sv = sharded_variable.ShardedVariable([v0, v1, v2])\n\n getattr(v, op)(sparse_delta, name='scatter_v')\n getattr(sv, op)(sparse_delta, name='scatter_sv')\n self.assertAllEqual(v, ops.convert_to_tensor(sv))\n\n @def_function.function\n def func():\n getattr(v, op)(sparse_delta, name='scatter_v')\n getattr(sv, op)(sparse_delta, name='scatter_sv')\n\n func()\n self.assertAllEqual(v, ops.convert_to_tensor(sv))\n\n def test_batch_scatter_update(self):\n v = variables_lib.Variable(array_ops.zeros((32, 1)))\n sparse_delta = indexed_slices.IndexedSlices(\n values=constant_op.constant([[0.], [1.], [2.], [3.], [4.], [5.]]),\n indices=constant_op.constant([10, 11, 12, 13, 14, 15]))\n\n v0 = variables_lib.Variable(array_ops.zeros((11, 1)))\n v1 = variables_lib.Variable(array_ops.zeros((11, 1)))\n v2 = variables_lib.Variable(array_ops.zeros((10, 1)))\n sv = sharded_variable.ShardedVariable([v0, v1, v2])\n\n v.batch_scatter_update(sparse_delta)\n sv.batch_scatter_update(sparse_delta)\n self.assertAllEqual(v, ops.convert_to_tensor(sv))\n\n @def_function.function\n def func():\n v.batch_scatter_update(sparse_delta)\n sv.batch_scatter_update(sparse_delta)\n\n func()\n self.assertAllEqual(v, ops.convert_to_tensor(sv))\n\n def test_sparse_read(self):\n v = variables_lib.Variable(array_ops.zeros((30, 1)))\n indices = constant_op.constant([0, 10, 12, 21, 22])\n\n v0 = variables_lib.Variable(array_ops.zeros((10, 1)))\n v1 = variables_lib.Variable(array_ops.zeros((10, 1)))\n v2 = variables_lib.Variable(array_ops.zeros((10, 1)))\n sv = sharded_variable.ShardedVariable([v0, v1, v2])\n\n self.assertAllEqual(v.sparse_read(indices), sv.sparse_read(indices))\n\n @def_function.function\n def func():\n return v.sparse_read(indices), sv.sparse_read(indices)\n\n got, expect = func()\n self.assertAllEqual(got, expect)\n\n def test_control_dep_on_assign(self):\n v0 = variables_lib.Variable([[0, 0]])\n v1 = variables_lib.Variable([[1, 1], [2, 2]])\n v2 = variables_lib.Variable([[3, 3]])\n s = sharded_variable.ShardedVariable([v0, v1, v2])\n\n @def_function.function\n def func():\n ret = s.assign([[4, 4], [5, 5], [6, 6], [7, 7]])\n with ops.control_dependencies([ret]):\n a = array_ops.ones((1, 1))\n with ops.control_dependencies([control_flow_ops.group(ret)]):\n b = array_ops.ones((1, 1))\n return a, b\n\n func()\n\n def test_convert_to_tensor(self):\n v0 = variables_lib.Variable([[0, 0]])\n v1 = variables_lib.Variable([[1, 1], [2, 2]])\n v2 = variables_lib.Variable([[3, 3]])\n s = sharded_variable.ShardedVariable([v0, v1, v2])\n t = ops.convert_to_tensor(s)\n self.assertAllEqual(t, [[0, 0], [1, 1], [2, 2], [3, 3]])\n\n def test_save_restore(self):\n fname = os.path.join(self.get_temp_dir(), 'checkpoint')\n variables = [\n variables_lib.Variable([0]),\n variables_lib.Variable([1]),\n variables_lib.Variable([2]),\n variables_lib.Variable([3])\n ]\n s = sharded_variable.ShardedVariable(variables, name='s')\n\n cp = util.Checkpoint(s=s)\n self.assertEqual(self.evaluate(cp.s.variables[0]), [0])\n cp.write(fname)\n\n self.evaluate(cp.s.variables[0].assign([4]))\n self.assertEqual(self.evaluate(cp.s.variables[0]), [4])\n\n cp.restore(fname)\n # Tests that the original weights are restored.\n self.assertEqual(self.evaluate(cp.s.variables[0]), [0])\n\n def test_save_restore_different_partitions(self):\n fname = os.path.join(self.get_temp_dir(), 'checkpoint')\n variables = [\n variables_lib.Variable([0]),\n variables_lib.Variable([1]),\n variables_lib.Variable([2]),\n variables_lib.Variable([3])\n ]\n s = sharded_variable.ShardedVariable(variables, name='s')\n\n cp = util.Checkpoint(s=s)\n cp.write(fname)\n\n variables2 = [variables_lib.Variable([0, 0, 0, 0])]\n s2 = sharded_variable.ShardedVariable(variables2, name='s')\n\n # Restore from 4 partitions into 1.\n cp2 = util.Checkpoint(s=s2)\n cp2.restore(fname)\n self.assertAllEqual(self.evaluate(cp2.s.variables[0]), [0, 1, 2, 3])\n\n self.evaluate(cp2.s.variables[0].assign([5, 10, 15, 20]))\n cp2.write(fname)\n\n # Restore 1 partition into 4.\n cp.restore(fname)\n self.assertEqual(self.evaluate(cp.s.variables[0]), [5])\n self.assertEqual(self.evaluate(cp.s.variables[1]), [10])\n self.assertEqual(self.evaluate(cp.s.variables[2]), [15])\n self.assertEqual(self.evaluate(cp.s.variables[3]), [20])\n\n def test_save_restore_4_to_2_partitions(self):\n fname = os.path.join(self.get_temp_dir(), 'checkpoint')\n variables = [\n variables_lib.Variable([0]),\n variables_lib.Variable([1]),\n variables_lib.Variable([2]),\n variables_lib.Variable([3])\n ]\n s = sharded_variable.ShardedVariable(variables, name='s')\n cp = util.Checkpoint(s=s)\n cp.write(fname)\n\n variables2 = [\n variables_lib.Variable([0, 0]),\n variables_lib.Variable([0, 0])\n ]\n s2 = sharded_variable.ShardedVariable(variables2, name='s')\n cp2 = util.Checkpoint(s=s2)\n cp2.restore(fname)\n # Assert that weights from the 4 partitions were loaded here.\n self.assertLen(cp2.s.variables, 2)\n self.assertAllEqual(self.evaluate(cp2.s.variables[0]), [0, 1])\n self.assertAllEqual(self.evaluate(cp2.s.variables[1]), [2, 3])\n\n def test_delayed_restore(self):\n fname = os.path.join(self.get_temp_dir(), 'checkpoint')\n model = autotrackable.AutoTrackable()\n variables = [\n variables_lib.Variable([0]),\n variables_lib.Variable([1]),\n variables_lib.Variable([2]),\n variables_lib.Variable([3])\n ]\n model.s = sharded_variable.ShardedVariable(variables)\n cp = util.Checkpoint(model=model)\n cp.write(fname)\n\n model2 = autotrackable.AutoTrackable()\n cp2 = util.Checkpoint(model=model2)\n cp2.restore(fname)\n variables2 = [\n variables_lib.Variable([0]),\n variables_lib.Variable([0]),\n variables_lib.Variable([0]),\n variables_lib.Variable([0])\n ]\n model2.s = sharded_variable.ShardedVariable(variables2)\n self.assertAllEqual(self.evaluate(model2.s.variables[0]), [0])\n self.assertAllEqual(self.evaluate(model2.s.variables[1]), [1])\n self.assertAllEqual(self.evaluate(model2.s.variables[2]), [2])\n self.assertAllEqual(self.evaluate(model2.s.variables[3]), [3])\n\n def test_delayed_restore_4_to_2_partitions(self):\n fname = os.path.join(self.get_temp_dir(), 'checkpoint')\n model = autotrackable.AutoTrackable()\n variables = [\n variables_lib.Variable([0]),\n variables_lib.Variable([1]),\n variables_lib.Variable([2]),\n variables_lib.Variable([3])\n ]\n model.s = sharded_variable.ShardedVariable(variables)\n cp = util.Checkpoint(model=model)\n cp.write(fname)\n\n model2 = autotrackable.AutoTrackable()\n cp2 = util.Checkpoint(model=model2)\n cp2.restore(fname)\n variables2 = [\n variables_lib.Variable([0, 0]),\n variables_lib.Variable([0, 0])\n ]\n model2.s = sharded_variable.ShardedVariable(variables2)\n self.assertAllEqual(self.evaluate(model2.s.variables[0]), [0, 1])\n self.assertAllEqual(self.evaluate(model2.s.variables[1]), [2, 3])\n\n def test_save_graph_def(self):\n root = autotrackable.AutoTrackable()\n v1 = variables_lib.Variable([3.])\n v2 = variables_lib.Variable([2.])\n root.v = sharded_variable.ShardedVariable([v1, v2])\n root.train = def_function.function(\n lambda x: embedding_ops.embedding_lookup_v2(root.v.variables, x))\n # TODO(b/144057383): Remove the necessity of root.serve once saving context\n # is made to tf.function cache.\n root.serve = def_function.function(\n lambda x: embedding_ops.embedding_lookup_v2(root.v.variables[0], x),\n input_signature=[tensor_spec.TensorSpec([2], dtypes.int32, name='x')])\n\n # Trace and use root.train\n self.assertAllEqual([3., 2.], root.train([0, 1]).numpy())\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save.save(root, save_dir, root.serve)\n self.assertAllEqual([3., 2.],\n _load_and_run(save_dir, {'x': [0, 1]})['output_0'])\n\n # Continue using root.train for training\n self.assertAllEqual([3., 2.], root.train([0, 1]).numpy())\n\n def test_validation_errors(self):\n with self.assertRaisesRegex(TypeError, 'should be a non-empty list of'):\n sharded_variable.ShardedVariable(None)\n\n with self.assertRaisesRegex(TypeError, 'should be a non-empty list of'):\n sharded_variable.ShardedVariable(\n [variables_lib.Variable([0]), 'not-a-variable'])\n\n with self.assertRaisesRegex(TypeError, 'should be a non-empty list of'):\n sharded_variable.ShardedVariable([])\n\n with self.assertRaisesRegex(ValueError, 'must have the same dtype'):\n sharded_variable.ShardedVariable([\n variables_lib.Variable([0], dtype='int64'),\n variables_lib.Variable([1], dtype='int32')\n ])\n\n with self.assertRaisesRegex(ValueError, 'the same shapes except'):\n sharded_variable.ShardedVariable([\n variables_lib.Variable(array_ops.ones((5, 10))),\n variables_lib.Variable(array_ops.ones((5, 20)))\n ])\n\n with self.assertRaisesRegex(ValueError, '`SaveSliceInfo` should not'):\n v = variables_lib.Variable([0])\n v._set_save_slice_info(\n variables_lib.Variable.SaveSliceInfo(\n full_name='s', full_shape=[2], var_offset=[0], var_shape=[1]))\n sharded_variable.ShardedVariable([v])\n\n def test_as_function_input(self):\n variables1 = [\n variables_lib.Variable([1]),\n variables_lib.Variable([1]),\n ]\n s = sharded_variable.ShardedVariable(variables1)\n variables2 = [\n variables_lib.Variable([2]),\n variables_lib.Variable([2]),\n ]\n s2 = sharded_variable.ShardedVariable(variables2)\n\n trace_count = [0]\n\n @def_function.function\n def func(sharded_var):\n trace_count[0] = trace_count[0] + 1\n sharded_var.assign([0, 0])\n\n func(s)\n self.assertAllEqual(ops.convert_to_tensor(s), [0, 0])\n self.assertEqual(trace_count[0], 1)\n func(s2)\n self.assertAllEqual(ops.convert_to_tensor(s2), [0, 0])\n self.assertEqual(trace_count[0], 1)\n\n def test_flatten(self):\n variables = [\n variables_lib.Variable([0]),\n variables_lib.Variable([1]),\n ]\n s = sharded_variable.ShardedVariable(variables)\n\n got = nest.flatten(s)\n self.assertIs(s, got[0])\n\n got = nest.flatten(s, expand_composites=True)\n self.assertAllEqual(variables, got)\n\n def test_tf_module(self):\n\n class Model(module.Module):\n\n def __init__(self):\n super().__init__()\n variables = [\n variables_lib.Variable([0]),\n variables_lib.Variable([1]),\n ]\n self.w = sharded_variable.ShardedVariable(variables)\n\n model = Model()\n\n self.assertLen(model.variables, 2)\n self.assertEqual(model.variables[0], [0])\n self.assertEqual(model.variables[1], [1])\n self.assertAllEqual(model.variables, model.trainable_variables)\n\n self.assertLen(model._trackable_children(), 1)\n self.assertIs(model._trackable_children().popitem()[1], model.w)\n\n def test_embedding_lookup(self):\n v = [\n variables_lib.Variable([[1., 2.], [3., 4.]]),\n variables_lib.Variable([[5., 6.], [7., 8.]]),\n variables_lib.Variable([[9., 10.]])\n ]\n sv = sharded_variable.ShardedVariable(v)\n\n @def_function.function\n def lookup():\n ids = constant_op.constant([0, 3, 4])\n return embedding_ops.embedding_lookup_v2(sv, ids)\n\n @def_function.function\n def sparse_lookup():\n sp_ids = sparse_tensor.SparseTensor(\n indices=[[0, 0], [0, 1], [1, 0], [2, 2]],\n values=[0, 3, 4, 1],\n dense_shape=[3, 3])\n return embedding_ops.embedding_lookup_sparse_v2(sv, sp_ids, None)\n\n @def_function.function\n def safe_sparse_lookup():\n sp_ids = sparse_tensor.SparseTensor(\n indices=[[0, 0], [0, 1], [1, 0], [2, 2]],\n values=[0, -1, 4, 1],\n dense_shape=[3, 3])\n sp_weights = sparse_tensor.SparseTensor(\n indices=[[0, 0], [0, 1], [1, 0], [2, 2]],\n values=[1., 1., -1., 1.],\n dense_shape=[3, 3])\n return embedding_ops.safe_embedding_lookup_sparse_v2(\n sv, sp_ids, sp_weights)\n\n # TODO(chenkai): Add safe_sparse_lookup to the list. Currently\n # ShardedVariable is converted to a tensor in safe_sparse_lookup.\n for func in [lookup, sparse_lookup]:\n num_gather_ops = 0\n for op in func.get_concrete_function().graph.get_operations():\n if op.type == 'ResourceGather':\n num_gather_ops += 1\n self.assertEqual(\n num_gather_ops, len(v), 'Number of ResourceGather op does not match'\n ' expected, possibly due to ShardedVariable accidentally being'\n ' converted to tensor in embedding_lookup ops.')\n\n self.assertAllEqual(lookup(), [[1., 2.], [7., 8.], [9., 10.]])\n self.assertAllClose(sparse_lookup(), [[4., 5.], [9., 10.], [3., 4.]])\n self.assertAllClose(safe_sparse_lookup(), [[1., 2.], [0., 0.], [3., 4.]])\n\n def test_slicing(self):\n v = [\n variables_lib.Variable([[1, 2], [3, 4], [5, 6]]),\n variables_lib.Variable([[7, 8], [9, 10], [11, 12]]),\n variables_lib.Variable([[13, 14], [15, 16]])\n ]\n sv = sharded_variable.ShardedVariable(v)\n empty = v[0][0:0]\n\n # Test cases: positive step\n self.assertAllEqual(sv[:], array_ops.concat(v, axis=0))\n self.assertAllEqual(sv[:2], [[1, 2], [3, 4]])\n self.assertAllEqual(sv[-8:2], [[1, 2], [3, 4]])\n self.assertAllEqual(sv[-10:2], [[1, 2], [3, 4]])\n self.assertAllEqual(sv[5:], [[11, 12], [13, 14], [15, 16]])\n self.assertAllEqual(sv[5:-1], [[11, 12], [13, 14]])\n self.assertAllEqual(sv[::3], [[1, 2], [7, 8], [13, 14]])\n self.assertAllEqual(sv[::5], [[1, 2], [11, 12]])\n self.assertAllEqual(sv[1::6], [[3, 4], [15, 16]])\n self.assertAllEqual(sv[1:5:6], [[3, 4]])\n self.assertAllEqual(sv[1::7], [[3, 4]])\n self.assertAllEqual(sv[2:7], [[5, 6], [7, 8], [9, 10], [11, 12], [13, 14]])\n self.assertAllEqual(sv[2:7:2], [[5, 6], [9, 10], [13, 14]])\n self.assertAllEqual(sv[2:7:3], [[5, 6], [11, 12]])\n\n # Test cases: negative step\n self.assertAllEqual(\n sv[::-1], array_ops.reverse(array_ops.concat(v, axis=0), axis=[0]))\n self.assertAllEqual(sv[2::-1], [[5, 6], [3, 4], [1, 2]])\n self.assertAllEqual(sv[2:-8:-1], [[5, 6], [3, 4]])\n self.assertAllEqual(sv[2:-10:-1], [[5, 6], [3, 4], [1, 2]])\n self.assertAllEqual(sv[4::-1], [[9, 10], [7, 8], [5, 6], [3, 4], [1, 2]])\n self.assertAllEqual(sv[-1:-3:-1], [[15, 16], [13, 14]])\n self.assertAllEqual(sv[::-5], [[15, 16], [5, 6]])\n self.assertAllEqual(sv[6::-6], [[13, 14], [1, 2]])\n self.assertAllEqual(sv[6:5:-6], [[13, 14]])\n self.assertAllEqual(sv[6::-7], [[13, 14]])\n self.assertAllEqual(sv[7:1:-1],\n [[15, 16], [13, 14], [11, 12], [9, 10], [7, 8], [5, 6]])\n self.assertAllEqual(sv[7:1:-2], [[15, 16], [11, 12], [7, 8]])\n self.assertAllEqual(sv[7:1:-4], [[15, 16], [7, 8]])\n\n # Test cases: empty slice\n self.assertAllEqual(sv[0:0], empty)\n self.assertAllEqual(sv[5:3], empty)\n self.assertAllEqual(sv[3:5:-1], empty)\n self.assertAllEqual(sv[-1:0], empty)\n self.assertAllEqual(sv[2:-1:-1], empty)\n\n # Test cases: slicing other dimensions\n self.assertAllEqual(sv[:, 0], [1, 3, 5, 7, 9, 11, 13, 15])\n self.assertAllEqual(sv[:, 0:1], [[1], [3], [5], [7], [9], [11], [13], [15]])\n\n # Test cases: normal indexing\n self.assertAllEqual(sv[2], [5, 6])\n self.assertAllEqual(sv[6], [13, 14])\n self.assertAllEqual(sv[2, 1], 6)\n self.assertAllEqual(sv[-2], [13, 14])\n with self.assertRaisesRegex(IndexError, 'out of bounds'):\n _ = sv[100]\n with self.assertRaisesRegex(IndexError, 'out of bounds'):\n _ = sv[-100]\n\n # Test cases: Ellipsis\n self.assertAllEqual(sv[...], array_ops.concat(v, axis=0))\n self.assertAllEqual(sv[..., 0], [1, 3, 5, 7, 9, 11, 13, 15])\n self.assertAllEqual(sv[0:1, ...], [[1, 2]])\n\n # Test cases: newaxis\n self.assertAllEqual(\n sv[array_ops.newaxis, ...],\n array_ops.expand_dims_v2(array_ops.concat(v, axis=0), axis=0))\n\n # Test cases: boolean masks\n self.assertAllEqual(sv[ops.convert_to_tensor(sv) > 10],\n [11, 12, 13, 14, 15, 16])\n\n # Test cases: tensor input\n with self.assertRaisesRegex(TypeError, 'not allowed'):\n _ = sv[constant_op.constant(1)::]\n with self.assertRaisesRegex(TypeError, 'not allowed'):\n _ = sv[:constant_op.constant(1):]\n with self.assertRaisesRegex(TypeError, 'not allowed'):\n _ = sv[constant_op.constant(1)]\n\n # Test cases: inside tf.function\n @def_function.function\n def func():\n a = sv[:, 0]\n return a\n\n self.assertAllEqual(func(), [1, 3, 5, 7, 9, 11, 13, 15])\n\n def test_operator_overload(self):\n v1 = [\n variables_lib.Variable([1.]),\n variables_lib.Variable([2.]),\n ]\n sv1 = sharded_variable.ShardedVariable(v1)\n\n v2 = [\n variables_lib.Variable([1.]),\n variables_lib.Variable([2.]),\n ]\n sv2 = sharded_variable.ShardedVariable(v2)\n\n equal = sv1 == sv2\n self.assertAllEqual(equal, [True, True])\n self.assertAllEqual(sv1 + sv2, [2.0, 4.0])\n\n def test_shards_have_container_set(self):\n v1 = [\n variables_lib.Variable([1.]),\n variables_lib.Variable([2.]),\n ]\n sv1 = sharded_variable.ShardedVariable(v1)\n for v in sv1.variables:\n self.assertTrue(hasattr(v, '_sharded_container'))\n self.assertIs(v._sharded_container(), sv1)\n\n def test_numpy(self):\n v1 = [\n variables_lib.Variable([1.]),\n variables_lib.Variable([2.]),\n ]\n sv1 = sharded_variable.ShardedVariable(v1)\n sv1_np = sv1.numpy()\n self.assertIsInstance(sv1_np, np.ndarray)\n self.assertAllEqual(sv1_np, np.array([1., 2.]))\n\n\nclass ShardedVariableSaveLoadTest(test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super().setUp()\n cluster_def = get_cluster_def(test_cluster_params, num_workers=2, num_ps=3)\n self.cluster_resolver = SimpleClusterResolver(ClusterSpec(cluster_def))\n\n def tearDown(self):\n super().tearDown()\n # Reset context to disconnect from the cluster.\n context._reset_context()\n\n def _create_strategy(self, num_shards):\n if num_shards > 1:\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n self.cluster_resolver,\n variable_partitioner=sharded_variable.FixedShardsPartitioner(\n num_shards))\n else:\n strategy = ds_context._get_default_strategy()\n return strategy\n\n @combinations.generate(\n combinations.combine(\n shard_config=[[2, 2], [2, 3], [3, 2], [2, 1], [1, 1]],\n ))\n def testSaveAndLoadSingleVariable(self, shard_config):\n \"\"\"Test saving and loading ShardedVariable with different numbers of shards.\n\n Loading tf.Variables into multiple Shards is not yet supported\n\n Args:\n shard_config: The number of shards to use before and after loading. For\n example, [2, 1] means to create and save the variable with 2 shards and\n load it into 1 shard (i.e., a regular tf.Variable).\n \"\"\"\n strategy = self._create_strategy(shard_config[0])\n\n with strategy.scope():\n var = variables_lib.Variable([1., 2., 3., 4., 5., 6.])\n\n # Save variable\n model_dir = self.get_temp_dir()\n save.save(var, model_dir)\n\n strategy2 = self._create_strategy(shard_config[1])\n with strategy2.scope():\n # Load variable\n loaded = load.load(model_dir)\n\n # Assert all values loaded, values are same\n if shard_config[1] > 1:\n loaded = array_ops.concat(loaded.variables, axis=0)\n self.assertLen(loaded.numpy(), 6)\n\n if shard_config[0] > 1:\n var = array_ops.concat(var.variables, axis=0)\n self.assertAllClose(var.numpy(), loaded.numpy())\n\n def testSaveAndLoadModuleUnderStrategy(self):\n\n class Dense(module.Module):\n\n def __init__(self):\n self.kernel = variables_lib.Variable(\n random_ops.random_uniform((6, 6)), name='kernel')\n self.bias = variables_lib.Variable(\n random_ops.random_uniform((6,)), name='bias')\n\n @def_function.function\n def __call__(self, x):\n out = math_ops.matmul(self.kernel, x)\n out = out + self.bias\n return out\n\n x = constant_op.constant(\n math_ops.range(6, dtype=dtypes.float32), shape=[6, 1])\n\n strategy = self._create_strategy(2)\n with strategy.scope():\n layer = Dense()\n expect = layer(x)\n\n model_dir = self.get_temp_dir()\n save.save(layer, model_dir)\n\n strategy2 = self._create_strategy(3)\n with strategy2.scope():\n loaded_layer = load.load(model_dir)\n # Should fail with informative error\n with self.assertRaisesRegex(ValueError, 'run a loaded non-Keras'):\n got = loaded_layer(x)\n\n # Loading without a strategy should work, because the tf.function is traced\n # with a single variable as input\n loaded_layer = load.load(model_dir)\n got = loaded_layer(x)\n self.assertAllClose(got, expect)\n\n\nif __name__ == '__main__':\n v2_compat.enable_v2_behavior()\n test.main()\n" ]
[ [ "tensorflow.python.compat.v2_compat.enable_v2_behavior", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.distribute.distribution_strategy_context._get_default_strategy", "tensorflow.python.distribute.test_util.TestClusterParams", "tensorflow.python.ops.variables.Variable.SaveSliceInfo", "tensorflow.python.ops.embedding_ops.embedding_lookup_v2", "tensorflow.python.eager.context._reset_context", "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.distribute.test_util.get_cluster_def", "tensorflow.python.distribute.sharded_variable.FixedShardsPartitioner", "tensorflow.python.trackable.autotrackable.AutoTrackable", "tensorflow.python.distribute.combinations.combine", "tensorflow.python.distribute.sharded_variable.MaxSizePartitioner", "tensorflow.python.ops.embedding_ops.embedding_lookup_sparse_v2", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.python.platform.test.main", "tensorflow.python.saved_model.loader.load", "tensorflow.python.checkpoint.checkpoint.Checkpoint", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.distribute.sharded_variable.MinSizePartitioner", "tensorflow.python.saved_model.save.save", "tensorflow.python.training.server_lib.ClusterSpec", "tensorflow.python.ops.embedding_ops.safe_embedding_lookup_sparse_v2", "tensorflow.python.client.session.Session", "tensorflow.python.framework.ops.convert_to_tensor", "numpy.array", "tensorflow.python.saved_model.load.load", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.framework.ops.Graph", "tensorflow.python.distribute.sharded_variable.ShardedVariable", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.util.nest.flatten", "tensorflow.python.framework.constant_op.constant" ] ]
tonyw/antares
[ "6c2c505ab73d25b2f40831898a10bbf09602a4a0" ]
[ "platforms/c-rocm/schedule/standard/batch_matmul_v1.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport numpy as np\nfrom tvm import te\nimport logging\nimport sys, time, subprocess\n\n\nimport json\nimport os\n\n\ndef schedule(attrs):\n cfg, s, output = attrs.auto_config, attrs.scheduler, attrs.outputs[0]\n th_vals, rd_vals = [attrs.get_extent(x) for x in output.op.axis], [attrs.get_extent(x) for x in output.op.reduce_axis]\n\n C = output\n A, B = C.op.input_tensors\n\n AA = s.cache_read(A, \"shared\", [C])\n AL = s.cache_read(AA, \"local\", [C])\n BB = s.cache_read(B, \"shared\", [C])\n BL = s.cache_read(BB, \"local\", [C])\n if C.op in s.outputs:\n CC = s.cache_write(C, \"local\")\n else:\n s[C].set_scope('local')\n CC, C = C, s.outputs[0].output(0)\n\n axes = C.op.axis\n y, x = axes[-2], axes[-1]\n b = s[C].fuse(*axes[:-2])\n k = CC.op.reduce_axis[0]\n\n cfg.flop = float(np.product(th_vals) * rd_vals[0] * 2.0)\n\n cfg.define_split('tile_k', cfg.axis(k), num_outputs=3)\n ko, kt, ki = cfg['tile_k'].apply(s, CC, k)\n\n block_x = te.thread_axis('blockIdx.x')\n block_y = te.thread_axis('blockIdx.y')\n thread_x = te.thread_axis('threadIdx.x')\n thread_y = te.thread_axis('threadIdx.y')\n s[C].bind(b, te.thread_axis('blockIdx.z'))\n\n cfg.define_split('tile_y', cfg.axis(y), num_outputs=4)\n cfg.define_split('tile_x', cfg.axis(x), num_outputs=4)\n\n by, tyz, ty, yi = cfg['tile_y'].apply(s, C, y)\n bx, txz, tx, xi = cfg['tile_x'].apply(s, C, x)\n\n s[C].bind(by, block_y)\n s[C].bind(bx, block_x)\n s[C].bind(tyz, te.thread_axis('vthread'))\n s[C].bind(txz, te.thread_axis('vthread'))\n s[C].bind(ty, thread_y)\n s[C].bind(tx, thread_x)\n s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)\n\n s[CC].compute_at(s[C], tx)\n\n # b, yo, xo = CC.op.axis\n s[CC].reorder(ko, kt, *CC.op.axis, ki)\n s[CC].unroll(kt)\n\n for stage in [AL, BL]:\n s[stage].compute_at(s[CC], kt)\n # _, xi = s[stage].split(stage.op.axis[1], factor=4)\n # s[stage].vectorize(xi)\n s[stage].double_buffer()\n\n cfg.define_knob('vectorize', [False, True] if attrs.backend != 'c-hlsl' else [False])\n # cfg.define_knob('storage_align', [16, 48])\n for stage in [AA, BB]:\n # s[stage].storage_align(s[stage].op.axis[0],\n # cfg['storage_align'].val, 0)\n s[stage].compute_at(s[CC], ko)\n\n fused = s[stage].fuse(*s[stage].op.axis)\n ty, tx = s[stage].split(fused, nparts=cfg['tile_y'].size[2])\n tx, xi = s[stage].split(tx, nparts=cfg['tile_x'].size[2])\n _, xi = s[stage].split(xi, factor=4)\n\n s[stage].bind(ty, thread_y)\n s[stage].bind(tx, thread_x)\n if cfg['vectorize'].val:\n s[stage].vectorize(xi)\n s[stage].double_buffer()\n\n s[C].pragma(by, 'auto_unroll_max_step', 125)\n s[C].pragma(by, 'unroll_explicit', False)\n\n" ]
[ [ "numpy.product" ] ]
hogikyan/xarrayutils
[ "a0c583113665882e70fce8590a90a5b2fff176c3" ]
[ "xarrayutils/plotting.py" ]
[ "import numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport gsw\n\n\ndef center_lim(ax, which='y'):\n if which == 'y':\n lim = np.array(ax.get_ylim())\n ax.set_ylim(np.array([-1, 1]) * abs(lim).max())\n elif which == 'x':\n lim = np.array(ax.get_xlim())\n ax.set_xlim(np.array([-1, 1]) * abs(lim).max())\n elif which in ['xy', 'yx']:\n center_lim(ax, 'x')\n center_lim(ax, 'y')\n else:\n raise ValueError('`which` is not in (`x,`y`, `xy`) found %s' % which)\n\n\ndef depth_logscale(ax, yscale=400, ticks=None):\n if ticks is None:\n ticks = [0, 100, 250, 500, 1000, 2500, 5000]\n ax.set_yscale('symlog', linthreshy=yscale)\n ticklabels = [str(a) for a in ticks]\n ax.set_yticks(ticks)\n ax.set_yticklabels(ticklabels)\n ax.invert_yaxis()\n\n\ndef plot_line_shaded_std(x, y, std_y, horizontal=True,\n ax=None,\n line_kwargs=dict(),\n fill_kwargs=dict()):\n \"\"\"Plot wrapper to draw line for y and shaded patch according to std_y.\n The shading represents one std on each side of the line..\n\n Parameters\n ----------\n x : numpy.array or xr.DataArray\n Coordinate.\n y : numpy.array or xr.DataArray\n line data.\n std_y : numpy.array or xr.DataArray\n std corresponding to y.\n horizontal : bool\n Determines if the plot is horizontal or vertical (e.g. x is plotted\n on the y-axis).\n ax : matplotlib.axes\n Matplotlib axes object to plot on (the default is plt.gca()).\n line_kwargs : dict\n optional parameters for line plot.\n fill_kwargs : dict\n optional parameters for std fill plot.\n\n \"\"\"\n\n line_defaults = {}\n\n # Set plot defaults into the kwargs\n if not ax:\n ax = plt.gca()\n\n # Apply defaults but respect input\n line_defaults.update(line_kwargs)\n\n if horizontal:\n p = ax.plot(x, y, **line_defaults)\n else:\n p = ax.plot(y, x, **line_defaults)\n\n fill_defaults = {'color': p[-1].get_color(),\n 'alpha': 0.35}\n\n # Apply defaults but respect input\n fill_defaults.update(fill_kwargs)\n\n if horizontal:\n ax.fill_between(x, y-std_y, y+std_y, **fill_defaults)\n else:\n ax.fill_betweenx(x, y-std_y, y+std_y, **fill_defaults)\n\n\ndef box_plot(box, ax=None, split_detection='True', **kwargs):\n \"\"\"plots box despite coordinate discontinuities.\n INPUT\n -----\n box: np.array\n Defines the box in the coordinates of the current axis.\n Describing the box corners [x1, x2, y1, y2]\n ax: matplotlib.axis\n axis for plotting. Defaults to plt.gca()\n kwargs: optional\n anything that can be passed to plot can be put as kwarg\n \"\"\"\n\n if len(box) != 4:\n raise RuntimeError(\"'box' must be a 4 element np.array, \\\n describing the box corners [x1, x2, y1, y2]\")\n xlim = plt.gca().get_xlim()\n ylim = plt.gca().get_ylim()\n x_split = False\n y_split = False\n\n if ax is None:\n ax = plt.gca()\n\n if split_detection:\n if np.diff([box[0], box[1]]) < 0:\n x_split = True\n\n if np.diff([box[2], box[3]]) < 0:\n y_split = True\n\n if y_split and not x_split:\n ax.plot([box[0], box[0], box[1], box[1], box[0]],\n [ylim[1], box[2], box[2], ylim[1], ylim[1]], **kwargs)\n\n ax.plot([box[0], box[0], box[1], box[1], box[0]],\n [ylim[0], box[3], box[3], ylim[0], ylim[0]], **kwargs)\n\n elif x_split and not y_split:\n ax.plot([xlim[1], box[0], box[0], xlim[1], xlim[1]],\n [box[2], box[2], box[3], box[3], box[2]], **kwargs)\n\n ax.plot([xlim[0], box[1], box[1], xlim[0], xlim[0]],\n [box[2], box[2], box[3], box[3], box[2]], **kwargs)\n\n elif x_split and y_split:\n ax.plot([xlim[1], box[0], box[0]], [box[2], box[2], ylim[1]],\n **kwargs)\n\n ax.plot([xlim[0], box[1], box[1]], [box[2], box[2], ylim[1]],\n **kwargs)\n\n ax.plot([xlim[1], box[0], box[0]], [box[3], box[3], ylim[0]],\n **kwargs)\n\n ax.plot([xlim[0], box[1], box[1]], [box[3], box[3], ylim[0]],\n **kwargs)\n\n elif not x_split and not y_split:\n ax.plot([box[0], box[0], box[1], box[1], box[0]],\n [box[2], box[3], box[3], box[2], box[2]], **kwargs)\n\ndef dict2box(di, xdim='lon', ydim='lat'):\n return np.array([di[xdim].start, di[xdim].stop,\n di[ydim].start, di[ydim].stop])\n\n\ndef box_plot_dict(di, xdim='lon', ydim='lat', **kwargs):\n \"\"\"plot box from xarray selection dict e.g.\n `{'xdim':slice(a, b), 'ydim':slice(c,d), ...}`\"\"\"\n\n # extract box from dict\n box = dict2box(di, xdim=xdim, ydim=ydim)\n # plot\n box_plot(box, **kwargs)\n\n\n\ndef draw_dens_contours_teos10(sigma='sigma0', add_labels=True, ax=None,\n density_grid=20, dens_interval=1.0,\n salt_on_x=True, slim=None, tlim=None,\n contour_kwargs={}, c_label_kwargs={}, **kwargs):\n \"\"\"draws density contours on the current plot.\n Assumes that the salinity and temperature values are given as SA and CT.\n Needs documentation... \"\"\"\n if ax is None:\n ax = plt.gca()\n\n if sigma not in ['sigma%i' % s for s in range(5)]:\n raise ValueError('Sigma function has to be one of `sigma0`...`sigma4` \\\n is: %s' % (sigma))\n\n # get salt (default: xaxis) and temp (default: yaxis) limits\n if salt_on_x:\n if not slim:\n slim = ax.get_xlim()\n if not tlim:\n tlim = ax.get_ylim()\n x = np.linspace(*slim, density_grid)\n y = np.linspace(*tlim, density_grid)\n else:\n if not tlim:\n tlim = ax.get_xlim()\n if not slim:\n slim = ax.get_ylim()\n x = np.linspace(*slim, density_grid)\n y = np.linspace(*tlim, density_grid)\n\n if salt_on_x:\n ss, tt = np.meshgrid(x, y)\n else:\n tt, ss = np.meshgrid(x, y)\n\n sigma_func = getattr(gsw, sigma)\n\n sig = sigma_func(ss, tt)\n\n levels = np.arange(np.floor(sig.min()), np.ceil(sig.max()), dens_interval)\n\n c_kwarg_defaults = dict(levels=levels, colors='0.4',\n linestyles='--', linewidths=0.5)\n c_kwarg_defaults.update(kwargs)\n c_kwarg_defaults.update(contour_kwargs)\n\n c_label_kwarg_defaults = dict(fmt='%.02f')\n c_label_kwarg_defaults.update(kwargs)\n c_label_kwarg_defaults.update(c_label_kwargs)\n\n ch = ax.contour(x, y, sig, **c_kwarg_defaults)\n ax.clabel(ch, **c_label_kwarg_defaults)\n\n if add_labels:\n plt.text(0.05, 0.05, '$\\sigma_{%s}$' % (sigma[-1]), fontsize=14,\n verticalalignment='center',\n horizontalalignment='center', transform=ax.transAxes,\n color=c_kwarg_defaults['colors'])\n\n\ndef tsdiagram(salt, temp, color=None, size=None,\n lon=None, lat=None, pressure=None,\n convert_teos10=True, ts_kwargs={},\n ax=None, fig=None, draw_density_contours=True,\n draw_cbar=True, add_labels=True,\n **kwargs):\n if ax is None:\n ax = plt.gca()\n\n if fig is None:\n fig = plt.gcf()\n\n if convert_teos10:\n temp_label = 'Conservative Temperature [$^{\\circ}C$]'\n salt_label = 'Absolute Salinity [$g/kg$]'\n if any([a is None for a in [lon, lat, pressure]]):\n raise ValueError('when converting to teos10 variables, \\\n input for lon, lat and pressure is needed')\n else:\n salt = gsw.SA_from_SP(salt, pressure, lon, lat)\n temp = gsw.CT_from_pt(salt, temp)\n else:\n temp_label = 'Potential Temperature [$^{\\circ}C$]'\n salt_label = 'Practical Salinity [$g/kg$]'\n\n if add_labels:\n ax.set_xlabel(salt_label)\n ax.set_ylabel(temp_label)\n\n scatter_kw_defaults = dict(s=size, c=color)\n scatter_kw_defaults.update(kwargs)\n s = ax.scatter(salt, temp, **scatter_kw_defaults)\n if draw_density_contours:\n draw_dens_contours_teos10(ax=ax, **ts_kwargs)\n if draw_cbar and color is not None:\n if isinstance(color, str) or isinstance(color, tuple):\n pass\n elif isinstance(color, list) or isinstance(color, np.ndarray) or \\\n isinstance(color, xr.DataArray):\n fig.colorbar(s, ax=ax)\n else:\n raise RuntimeError('`color` not recognized. %s' % type(color))\n return s\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.linspace", "numpy.meshgrid", "matplotlib.pyplot.gcf", "numpy.diff", "matplotlib.pyplot.text", "numpy.array" ] ]
rluver/ocr_attention_tensorflow
[ "448876d809bd6f80d7fae00ebdbbc9222046ddb4" ]
[ "inference.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 31 23:13:26 2021\n\n@author: MJH\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom model import OCR_Attention\nfrom auxiliary import decode_batch_predictions, encode_single_sample\nfrom tensorflow.keras.models import Model\n\n\n\n\nclass OCR:\n \n def __init__(self, config_path, model_path, max_length):\n with open(config_path, 'r', encoding = 'utf-8') as f:\n config = eval(f.read()) \n self.model = OCR_Attention(**config).build_model()\n \n # prediction model\n self.model = Model(\n self.model.get_layer(name = 'input_image').input,\n self.model.get_layer(name = 'classification_layer').output\n )\n self.model.load_weights(model_path)\n self.model.summary()\n \n self.max_length = max_length\n \n \n def predict(self, image_path):\n \n image = tf.data.Dataset.from_tensor_slices([image_path])\n image = (\n image.map(\n encode_single_sample, num_parallel_calls = tf.data.experimental.AUTOTUNE\n )\n .batch(1)\n .prefetch(buffer_size = tf.data.experimental.AUTOTUNE)\n )\n \n image = list(image.take(1))\n self.image = image\n \n preds = self.model.predict(image)\n pred_texts = decode_batch_predictions(preds, self.max_length)\n self.pred_texts = pred_texts\n \n return pred_texts\n \n \n def visualize(self):\n \n _, ax = plt.subplots(1, 1, figsize = (2, 2))\n try:\n image = (self.image[0]['input_image'][0, :, :, 0] * 255).numpy().astype(np.uint8)\n image = image.T\n title = f'Prediction: {self.pred_texts}'\n ax.imshow(image, cmap = 'gray')\n ax.set_title(title)\n ax.axis('off')\n plt.show()\n \n except Exception as e:\n print(e)\n\n\n\n\nif __name__ == '__main__':\n\n image_path = '' \n \n ocr = OCR('config.json', 'model/model', 25)\n ocr.predict(image_path)\n ocr.visualize()" ]
[ [ "tensorflow.data.Dataset.from_tensor_slices", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
MLO-lab/MuVI
[ "98796ab6deab225adc3add75e20c4b51f842f529" ]
[ "muvi/core/index.py" ]
[ "import numpy as np\n\n\ndef _normalize_index(indexer, index, as_idx=True):\n # work with ints, convert at the end\n # if single str, get idx and put to list\n # TODO: can be an issue if any of the indices is named 'all'..\n if isinstance(indexer, str):\n if indexer == \"all\":\n indexer = range(len(index))\n else:\n indexer = [index.get_loc(indexer)]\n # if single integer, put to list\n if isinstance(indexer, (np.integer, int)):\n indexer = [indexer]\n # work with np array\n indexer = np.array(indexer)\n # if empty\n if len(indexer) == 0:\n raise IndexError(f\"Empty index, `{indexer}`.\")\n # if mask, get indices where True\n if isinstance(indexer[0], (bool, np.bool_)):\n indexer = np.where(indexer)[0]\n # if all False from previous boolean mask\n if len(indexer) == 0:\n raise IndexError(f\"Empty index, `{indexer}`.\")\n # note empty, get first element\n # reason: dtype of str was not working for pd.Index\n # if str, get indices where names match\n if isinstance(indexer[0], (str, np.str_)):\n indexer = index.get_indexer(indexer)\n if isinstance(indexer[0], (int, np.integer)):\n if as_idx:\n return indexer\n return index[indexer]\n raise IndexError(f\"Invalid index, `{indexer}`.\")\n" ]
[ [ "numpy.array", "numpy.where" ] ]
czlwang/groundedSCAN
[ "3d03ac6de37dde8d22d487dc3cc5a53af188fa2e" ]
[ "GroundedScan/gym_minigrid/minigrid.py" ]
[ "import math\nimport gym\nfrom enum import IntEnum\nimport numpy as np\nfrom gym import spaces\nfrom gym.utils import seeding\n\n# Size in pixels of a cell in the full-scale human view\nCELL_PIXELS = 60\n\n# Map of color names to RGB values\nCOLORS = {\n 'red': np.array([128, 0, 0]),\n 'green': np.array([46, 139, 87]),\n 'blue': np.array([25, 25, 112]),\n 'purple': np.array([112, 39, 195]),\n 'yellow': np.array([255, 191, 0]),\n 'grey': np.array([100, 100, 100]),\n 'pink': np.array([255, 192, 203])\n}\n\nCOLOR_NAMES = sorted(list(COLORS.keys()))\n\n# Used to map colors to integers\nCOLOR_TO_IDX = {\n 'red': 0,\n 'green': 1,\n 'blue': 2,\n 'purple': 3,\n 'yellow': 4,\n 'grey': 5,\n 'pink': 6\n}\n\nIDX_TO_COLOR = dict(zip(COLOR_TO_IDX.values(), COLOR_TO_IDX.keys()))\n\n# Map of object type to integers\nOBJECT_TO_IDX = {\n 'unseen': 0,\n 'empty': 1,\n 'circle': 2,\n 'cylinder': 3,\n 'square': 4,\n 'agent': 5,\n}\n\nIDX_TO_OBJECT = dict(zip(OBJECT_TO_IDX.values(), OBJECT_TO_IDX.keys()))\n\n# Map of agent direction indices to vectors\nDIR_TO_VEC = [\n # Pointing right (positive X)\n np.array((1, 0)),\n # Down (positive Y)\n np.array((0, 1)),\n # Pointing left (negative X)\n np.array((-1, 0)),\n # Up (negative Y)\n np.array((0, -1)),\n]\n\n# TODO: change\nWEIGHT_TO_MOMENTUM = {\n \"light\": 1,\n \"heavy\": 2\n}\n\n\nclass WorldObj:\n \"\"\"\n Base class for grid world objects\n \"\"\"\n\n def __init__(self, type, color, size=1, vector_representation=None, object_representation=None, target=False,\n weight=\"light\"):\n assert type in OBJECT_TO_IDX, type\n assert color in COLOR_TO_IDX, color\n assert 1 <= size <= 4, \"Sizes outside of range [1,4] not supported.\"\n self.type = type\n self.color = color\n self.border_color = color\n self.contains = None\n self.size = size\n\n # Initial position of the object\n self.init_pos = None\n\n # Current position of the object\n self.cur_pos = None\n\n # Representations\n self.vector_representation = vector_representation\n self.object_representation = object_representation\n\n # Boolean whether an object is a target\n self.target = target\n\n # Determining whether a heavy object can be moved in the next step or not\n self.momentum = 0\n self.weight = weight\n self.momentum_threshold = WEIGHT_TO_MOMENTUM[self.weight]\n\n def can_overlap(self):\n \"\"\"Can the agent overlap with this?\"\"\"\n return True\n\n def can_pickup(self):\n \"\"\"Can the agent pick this up?\"\"\"\n return False\n\n def can_push(self):\n \"\"\"Can the agent push this?\"\"\"\n return False\n\n def render(self, r):\n \"\"\"Draw this object with the given renderer\"\"\"\n raise NotImplementedError\n\n def _set_color(self, r):\n \"\"\"Set the color of this object as the active drawing color\"\"\"\n c = COLORS[self.color]\n border_color = COLORS[self.border_color]\n r.setLineColor(border_color[0], border_color[1], border_color[2])\n r.setColor(c[0], c[1], c[2])\n\n\nclass Square(WorldObj):\n def __init__(self, color='grey', size=1, vector_representation=None, object_representation=None, target=False,\n weight=\"light\"):\n super().__init__('square', color, size, vector_representation=vector_representation,\n object_representation=object_representation, target=target, weight=weight)\n\n def render(self, r):\n self._set_color(r)\n\n # TODO: max_size is 4 here hardcoded\n r.drawPolygon([\n (0, CELL_PIXELS * (self.size / 4)),\n (CELL_PIXELS * (self.size / 4), CELL_PIXELS * (self.size / 4)),\n (CELL_PIXELS * (self.size / 4), 0),\n (0, 0)\n ])\n\n def can_pickup(self):\n return True\n\n def can_push(self):\n return True\n\n def push(self):\n self.momentum += 1\n if self.momentum >= self.momentum_threshold:\n self.momentum = 0\n return True\n else:\n return False\n\n\nclass Cylinder(WorldObj):\n def __init__(self, color='blue', size=1, vector_representation=None, object_representation=None, weight=\"light\"):\n super(Cylinder, self).__init__('cylinder', color, size, vector_representation,\n object_representation=object_representation, weight=weight)\n # TODO: generalize sizes\n\n def can_pickup(self):\n return True\n\n def render(self, r):\n self._set_color(r)\n\n # Vertical quad\n parallelogram_width = (CELL_PIXELS / 2) * (self.size / 4)\n parallelogram_height = CELL_PIXELS * (self.size / 4)\n r.drawPolygon([\n (CELL_PIXELS / 2, 0),\n (CELL_PIXELS / 2 + parallelogram_width, 0),\n (CELL_PIXELS / 2, parallelogram_height),\n (CELL_PIXELS / 2 - parallelogram_width, parallelogram_height)\n ])\n\n def can_push(self):\n return True\n\n def push(self):\n self.momentum += 1\n if self.momentum >= self.momentum_threshold:\n self.momentum = 0\n return True\n else:\n return False\n\n\nclass Circle(WorldObj):\n def __init__(self, color='blue', size=1, vector_representation=None, object_representation=None, target=False,\n weight=\"light\"):\n super(Circle, self).__init__('circle', color, size, vector_representation,\n object_representation=object_representation, target=target, weight=weight)\n\n def can_pickup(self):\n return True\n\n def can_push(self):\n return True\n\n def render(self, r):\n self._set_color(r)\n r.drawCircle(CELL_PIXELS * 0.5, CELL_PIXELS * 0.5, CELL_PIXELS // 10 * self.size)\n\n def push(self):\n self.momentum += 1\n if self.momentum >= self.momentum_threshold:\n self.momentum = 0\n return True\n else:\n return False\n\n\nclass Grid:\n \"\"\"\n Represent a grid and operations on it\n \"\"\"\n\n def __init__(self, width, height, depth):\n assert width >= 3\n assert height >= 3\n\n self.width = width\n self.height = height\n self._num_attributes_object = depth\n self.grid = [None] * width * height\n\n def __contains__(self, key):\n if isinstance(key, WorldObj):\n for e in self.grid:\n if e is key:\n return True\n elif isinstance(key, tuple):\n for e in self.grid:\n if e is None:\n continue\n if (e.color, e.type) == key:\n return True\n if key[0] is None and key[1] == e.type:\n return True\n return False\n\n def __eq__(self, other):\n grid1 = self.encode()\n grid2 = other.encode()\n return np.array_equal(grid2, grid1)\n\n def __ne__(self, other):\n return not self == other\n\n def copy(self):\n from copy import deepcopy\n return deepcopy(self)\n\n def set(self, i, j, v):\n assert i >= 0 and i < self.width\n assert j >= 0 and j < self.height\n self.grid[j * self.width + i] = v\n\n def get(self, i, j):\n assert i >= 0 and i < self.width\n assert j >= 0 and j < self.height\n return self.grid[j * self.width + i]\n\n def rotate_left(self):\n \"\"\"\n Rotate the grid to the left (counter-clockwise)\n \"\"\"\n\n grid = Grid(self.height, self.width)\n\n for i in range(self.width):\n for j in range(self.height):\n v = self.get(i, j)\n grid.set(j, grid.height - 1 - i, v)\n\n return grid\n\n def slice(self, topX, topY, width, height):\n \"\"\"\n Get a subset of the grid\n \"\"\"\n\n grid = Grid(width, height)\n\n for j in range(0, height):\n for i in range(0, width):\n x = topX + i\n y = topY + j\n\n if x >= 0 and x < self.width and \\\n y >= 0 and y < self.height:\n v = self.get(x, y)\n else:\n v = Square()\n\n grid.set(i, j, v)\n\n return grid\n\n def history2points(self, agent_history):\n\n points = []\n for p in agent_history:\n row = p[\"row\"]\n col = p[\"column\"]\n direction = p[\"direction\"]\n \n cy = CELL_PIXELS * (row + 0.5)\n cx = CELL_PIXELS * (col + 0.5)\n\n #import pdb; pdb.set_trace()\n directions = {\"north\": (cx, cy - 12),\n \"south\": (cx, cy + 12), \n \"east\": (cx + 12, cy), \n \"west\": (cx - 12, cy)}\n\n\n #r.setLineColor(255, 192, 203)\n points.append(directions[direction])\n return points\n\n def render(self, r, tile_size, attention_weights=[], agent_history=[]):\n \"\"\"\n Render this grid at a given scale\n :param r: target renderer object\n :param tile_size: tile size in pixels\n \"\"\"\n\n assert r.width == self.width * tile_size\n assert r.height == self.height * tile_size\n\n # Total grid size at native scale\n widthPx = self.width * CELL_PIXELS\n heightPx = self.height * CELL_PIXELS\n\n r.push()\n\n # Internally, we draw at the \"large\" full-grid resolution, but we\n # use the renderer to scale back to the desired size\n r.scale(tile_size / CELL_PIXELS, tile_size / CELL_PIXELS)\n\n if len(attention_weights) > 0:\n if len(attention_weights) == self.width * self.height:\n pixel_attention = False\n attention_weights = attention_weights.reshape(self.width, self.height)\n elif len(attention_weights) == self.width * CELL_PIXELS * self.height * CELL_PIXELS:\n pixel_attention = True\n attention_weights = attention_weights.reshape(self.width * CELL_PIXELS, self.height * CELL_PIXELS)\n start_range = 0\n end_range = 150\n else:\n pixel_attention = False\n # Draw the background of the in-world cells black\n if not pixel_attention:\n r.fillRect(\n 0,\n 0,\n widthPx,\n heightPx,\n 255, 255, 255\n )\n else:\n for j in range(0, heightPx):\n for i in range(0, widthPx):\n current_weight = attention_weights[j, i]\n color = int((end_range - start_range) * (1 - current_weight))\n r.push()\n r.fillRect(i, j, 1, 1, r=color, g=color, b=color)\n r.pop()\n\n # Draw the agent path\n\n points = self.history2points(agent_history)\n\n endpoints = zip(points, points[1:])\n #import pdb; pdb.set_trace()\n\n #for i, pos in enumerate(agent_history):\n # row = pos[\"row\"]\n # col = pos[\"column\"]\n # color = 150*(1 - (1/(1+(len(agent_history) - i))))\n # color = 255*(1 - math.exp(0.2*(i-len(agent_history))))\n # print(color)\n # r.fillRect(col * CELL_PIXELS, row * CELL_PIXELS, CELL_PIXELS, CELL_PIXELS, r=color, g=color, b=color)\n \n # Draw grid lines\n r.setLineColor(100, 100, 100)\n for rowIdx in range(0, self.height):\n y = CELL_PIXELS * rowIdx\n r.drawLine(0, y, widthPx, y)\n for colIdx in range(0, self.width):\n x = CELL_PIXELS * colIdx\n r.drawLine(x, 0, x, heightPx)\n\n # Render the grid\n for j in range(0, self.height):\n for i in range(0, self.width):\n cell = self.get(i, j)\n if len(attention_weights) > 0 and not pixel_attention:\n current_weight = attention_weights[j, i]\n color = int((end_range - start_range) * (1 - current_weight))\n r.push()\n r.fillRect(i * CELL_PIXELS, j * CELL_PIXELS, CELL_PIXELS, CELL_PIXELS, r=color, g=color, b=color)\n if cell == None:\n continue\n r.push()\n\n r.translate(i * CELL_PIXELS, j * CELL_PIXELS)\n cell.render(r)\n r.pop()\n\n for i, e in enumerate(endpoints):\n alpha = 255*(math.exp(0.1*(i-len(agent_history))))\n r.setLineColor(255, 192, 203, alpha)\n r.setLineWidth(2)\n r.drawLine(e[0][0], e[0][1], e[1][0], e[1][1])\n \n r.pop()\n\n def encode(self, agent_row: int, agent_column: int, agent_direction: int):\n \"\"\"\n Produce a compact numpy encoding of the grid.\n \"\"\"\n array = np.zeros((self.width, self.height, self._num_attributes_object + 1 + 4), dtype='uint8')\n for col in range(self.width):\n for row in range(self.height):\n grid_cell = self.get(col, row)\n empty_representation = np.zeros(self._num_attributes_object + 1 + 4)\n if grid_cell:\n empty_representation[:-5] = grid_cell.vector_representation\n\n # Set agent feature to 1 for the grid cell with the agent and add it's direction in one-hot form.\n if col == agent_column and row == agent_row:\n empty_representation[-5] = 1\n one_hot_direction = np.zeros(4)\n one_hot_direction[agent_direction] = 1\n empty_representation[-4:] = one_hot_direction\n array[row, col, :] = empty_representation\n return array\n\n\nclass MiniGridEnv(gym.Env):\n \"\"\"\n 2D grid world game environment.\n \"\"\"\n\n metadata = {\n 'render.modes': ['human', 'rgb_array', 'pixmap'],\n 'video.frames_per_second': 10\n }\n\n # Enumeration of possible actions\n class Actions(IntEnum):\n # Turn left, turn right, move forward\n left = 0\n right = 1\n forward = 2\n\n # Pick up an object\n pickup = 3\n # Drop an object\n drop = 4\n\n # Done completing task\n done = 6\n\n def __init__(self, grid_size=None, width=None, height=None, max_steps=100, seed=1337):\n # Can't set both grid_size and width/height\n if grid_size:\n assert width == None and height == None\n width = grid_size\n height = grid_size\n\n # Action enumeration for this environment\n self.actions = MiniGridEnv.Actions\n\n # Actions are discrete integer values\n self.action_space = spaces.Discrete(len(self.actions))\n\n # Range of possible rewards\n self.reward_range = (0, 1)\n\n # Renderer object used to render the whole grid (full-scale)\n self.grid_render = None\n\n # Renderer used to render observations (small-scale agent view)\n self.obs_render = None\n\n # Environment configuration\n self.width = width\n self.height = height\n self.max_steps = max_steps\n\n # Current position and direction of the agent\n self.agent_pos = None\n self.agent_dir = None\n \n # Initialize the RNG\n self.seed(seed=seed)\n\n # Initialize the state\n self.reset()\n\n def reset(self):\n # Current position and direction of the agent\n self.agent_pos = None\n self.agent_dir = None\n\n # Generate a new random grid at the start of each episode\n # To keep the same grid for each episode, call env.seed() with\n # the same seed before calling env.reset()\n self._gen_grid(self.width, self.height)\n\n # These fields should be defined by _gen_grid\n assert self.agent_pos is not None\n assert self.agent_dir is not None\n\n # Check that the agent doesn't overlap with an object\n start_cell = self.grid.get(*self.agent_pos)\n assert start_cell is None or start_cell.can_overlap()\n\n # Item picked up, being carried, initially nothing\n self.carrying = None\n\n # Step count since episode start\n self.step_count = 0\n\n return\n\n def seed(self, seed=1337):\n # Seed the random number generator\n self.np_random, _ = seeding.np_random(seed)\n return [seed]\n\n @property\n def steps_remaining(self):\n return self.max_steps - self.step_count\n\n def __str__(self):\n \"\"\"\n Produce a pretty string of the environment's grid along with the agent.\n A grid cell is represented by 2-character string, the first one for\n the object and the second one for the color.\n \"\"\"\n\n # Map of object types to short string\n OBJECT_TO_STR = {\n 'circle': 'A',\n 'square': 'B',\n 'cylinder': 'C',\n }\n\n # Map agent's direction to short string\n AGENT_DIR_TO_STR = {\n 0: '>',\n 1: 'V',\n 2: '<',\n 3: '^'\n }\n\n str = ''\n for j in range(self.grid.height):\n for i in range(self.grid.width):\n if i == self.agent_pos[0] and j == self.agent_pos[1]:\n str += 2 * AGENT_DIR_TO_STR[self.agent_dir]\n continue\n c = self.grid.get(i, j)\n if not c:\n str += ' '\n continue\n str += OBJECT_TO_STR[c.type] + c.color[0].upper()\n if j < self.grid.height - 1:\n str += '\\n'\n return str\n\n def _gen_grid(self, width, height):\n assert False, \"_gen_grid needs to be implemented by each environment\"\n\n def _rand_int(self, low, high):\n \"\"\"\n Generate random integer in [low,high[\n \"\"\"\n\n return self.np_random.randint(low, high)\n\n def place_obj(self, obj, top=None, size=None, reject_fn=None, max_tries=math.inf):\n \"\"\"\n Place an object at an empty position in the grid\n\n :param obj:\n :param top: top-left position of the rectangle where to place\n :param size: size of the rectangle where to place\n :param reject_fn: function to filter out potential positions\n :param max_tries:\n \"\"\"\n\n if top is None:\n top = (0, 0)\n else:\n top = (max(top[0], 0), max(top[1], 0))\n\n if size is None:\n size = (self.grid.width, self.grid.height)\n\n num_tries = 0\n\n while True:\n # This is to handle with rare cases where rejection sampling\n # gets stuck in an infinite loop\n if num_tries > max_tries:\n raise RecursionError('rejection sampling failed in place_obj')\n\n num_tries += 1\n\n pos = np.array((\n self._rand_int(top[0], min(top[0] + size[0], self.grid.width)),\n self._rand_int(top[1], min(top[1] + size[1], self.grid.height))\n ))\n\n # Don't place the object on top of another object\n if self.grid.get(*pos) != None:\n continue\n\n # Check if there is a filtering criterion\n if reject_fn and reject_fn(self, pos):\n continue\n\n break\n\n self.grid.set(*pos, obj)\n\n if obj is not None:\n obj.init_pos = pos\n obj.cur_pos = pos\n\n return pos\n\n def place_agent(\n self,\n top=None,\n size=None,\n rand_dir=True,\n max_tries=math.inf\n ):\n \"\"\"\n Set the agent's starting point at an empty position in the grid\n \"\"\"\n\n self.agent_pos = None\n pos = self.place_obj(None, top, size, max_tries=max_tries)\n self.agent_pos = pos\n\n if rand_dir:\n self.agent_dir = self._rand_int(0, 4)\n\n return pos\n\n @property\n def dir_vec(self):\n \"\"\"\n Get the direction vector for the agent, pointing in the direction\n of forward movement.\n \"\"\"\n\n assert self.agent_dir >= 0 and self.agent_dir < 4\n return DIR_TO_VEC[self.agent_dir]\n\n @property\n def right_vec(self):\n \"\"\"\n Get the vector pointing to the right of the agent.\n \"\"\"\n\n dx, dy = self.dir_vec\n return np.array((-dy, dx))\n\n @property\n def front_pos(self):\n \"\"\"\n Get the position of the cell that is right in front of the agent\n \"\"\"\n\n return self.agent_pos + self.dir_vec\n\n def step(self, action):\n self.step_count += 1\n\n reward = 0\n done = False\n\n current_cell = self.grid.get(*self.agent_pos)\n\n # Rotate left\n if action == self.actions.left:\n self.agent_dir -= 1\n if self.agent_dir < 0:\n self.agent_dir += 4\n\n # Rotate right\n elif action == self.actions.right:\n self.agent_dir = (self.agent_dir + 1) % 4\n\n # Move forward\n elif action == self.actions.forward:\n # Get the position in front of the agent\n fwd_pos = self.front_pos\n\n # Get the contents of the cell in front of the agent\n fwd_cell = self.grid.get(*fwd_pos)\n if fwd_cell == None or fwd_cell.can_overlap():\n self.agent_pos = fwd_pos\n if fwd_cell != None and fwd_cell.type == 'goal':\n done = True\n reward = self._reward()\n if fwd_cell != None and fwd_cell.type == 'lava':\n done = True\n\n # Pick up an object\n elif action == self.actions.pickup:\n if current_cell.can_pickup():\n if self.carrying is None:\n self.carrying = current_cell\n self.carrying.cur_pos = np.array([-1, -1])\n self.grid.set(*self.agent_pos, None)\n\n # Drop an object\n elif action == self.actions.drop:\n if not current_cell and self.carrying:\n self.grid.set(*self.agent_pos, self.carrying)\n self.carrying.cur_pos = self.agent_pos\n self.carrying = None\n\n # Done action (not used by default)\n elif action == self.actions.done:\n pass\n\n else:\n assert False, \"unknown action\"\n\n if self.step_count >= self.max_steps:\n done = True\n\n return reward, done, {}\n\n def render(self, mode='', close=False, highlight=True, tile_size=CELL_PIXELS, attention_weights=[], agent_history=[]):\n \"\"\"\n Render the whole-grid human view\n \"\"\"\n\n if close:\n if self.grid_render:\n self.grid_render.close()\n return\n\n\n if self.grid_render is None or self.grid_render.window is None or (self.grid_render.width != self.width * tile_size):\n from GroundedScan.gym_minigrid.rendering import Renderer\n self.grid_render = Renderer(\n self.width * tile_size,\n self.height * tile_size,\n True if mode == 'human' else False\n )\n\n r = self.grid_render\n #import pdb; pdb.set_trace()\n\n if r.window and len(agent_history) > 0:\n last_pos = agent_history[-1]\n action = last_pos[\"action\"]\n style = \"font-size: 20pt;\" \n if action==\"stay\":\n style += \" color: rgb(255,0,0)\"\n r.window.setStyleSheet(style)\n r.window.setText(action)\n\n r.beginFrame()\n\n # Render the whole grid\n if len(attention_weights) > 0:\n flat_attention_weights = attention_weights[0]\n else:\n flat_attention_weights = attention_weights\n self.grid.render(r, tile_size, attention_weights=flat_attention_weights, agent_history=agent_history)\n\n # Draw the agent\n ratio = tile_size / CELL_PIXELS\n r.push()\n r.scale(ratio, ratio)\n r.translate(\n CELL_PIXELS * (self.agent_pos[0] + 0.5),\n CELL_PIXELS * (self.agent_pos[1] + 0.5)\n )\n r.rotate(self.agent_dir * 90)\n r.setLineColor(255, 192, 203)\n r.setColor(255, 192, 203)\n r.drawPolygon([\n (-12, 10),\n (12, 0),\n (-12, -10)\n ])\n r.pop()\n r.endFrame()\n\n if mode == 'rgb_array':\n return r.getArray()\n elif mode == 'pixmap':\n return r.getPixmap()\n return r\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.array_equal" ] ]
blakeNaccarato/boilerdaq
[ "bc39074237c42d94c5402327495f05ff662e7a35" ]
[ "src/boilerdaq.py" ]
[ "\"\"\"Data acquisition and control of a boiler.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nfrom collections import OrderedDict, deque\nfrom csv import DictReader, DictWriter\nfrom datetime import datetime, timedelta\nfrom os.path import splitext\nfrom threading import Thread\nfrom time import sleep\nfrom typing import Deque, List, NamedTuple, Optional, Tuple\n\nimport pyqtgraph\nfrom mcculw.ul import ULError, t_in, v_in\nfrom numpy import exp, random\nfrom pyvisa import VisaIOError\nfrom simple_pid import PID\n\npyqtgraph.setConfigOptions(antialias=True)\nDELAY = 2 # read/write/plot timestep\nHISTORY_LENGTH = 300 # points to keep for plotting and fitting\n\nif os.environ.get(\"BOILERDAQ_DEBUG\") == \"True\":\n DEBUG = True\nelse:\n DEBUG = False\n\nif DEBUG:\n DELAY_DEBUG = 0.2\n GAIN_DEBUG = 100\n TAU_DEBUG = DELAY * HISTORY_LENGTH\n NOISE_SCALE = 1e-2\n\n\nclass Sensor(NamedTuple):\n \"\"\"\n Sensor parameters.\n\n Parameters\n ----------\n name: str\n Name of the sensor.\n board: int\n Which board the sensor belongs to.\n channel: int\n The channel pointing to this sensor on the board.\n reading: int\n The sensor type, either \"Temperature\" or \"Voltage\".\n unit: str\n The unit type for values reported by the board.\n \"\"\"\n\n name: str\n board: int\n channel: int\n reading: str\n unit: str\n\n @classmethod\n def get(cls, path: str) -> List[Sensor]:\n \"\"\"Process a CSV file at ``path``, returning a ``List`` of ``Sensor``.\"\"\"\n\n sensors = []\n with open(path) as csv_file:\n reader = DictReader(csv_file)\n for row in reader:\n sensors.append(\n cls(\n row[\"name\"],\n int(row[\"board\"]),\n int(row[\"channel\"]),\n row[\"reading\"],\n row[\"unit\"],\n )\n )\n return sensors\n\n\nclass ScaledParam(NamedTuple):\n \"\"\"\n Parameters for scalar modification of a sensor.\n\n Parameters\n ----------\n name: str\n Name of the scaled value.\n unscaled_sensor: str\n Name of the sensor to be scaled.\n scale: float\n The scale to apply.\n offset: float\n The offset to apply.\n unit: str\n The unit type after scaling.\n \"\"\"\n\n name: str\n unscaled_sensor: str\n scale: float\n offset: float\n unit: str\n\n @classmethod\n def get(cls, path: str) -> List[ScaledParam]:\n \"\"\"Process a CSV file at ``path``, returning a ``List`` of ``ScaledParam``.\"\"\"\n\n params = []\n with open(path) as csv_file:\n reader = DictReader(csv_file)\n for row in reader:\n params.append(\n cls(\n row[\"name\"],\n row[\"unscaled_sensor\"],\n float(row[\"scale\"]),\n float(row[\"offset\"]),\n str(row[\"unit\"]),\n )\n )\n return params\n\n\nclass FluxParam(NamedTuple):\n \"\"\"\n Parameters for the flux between two sensors.\n\n Parameters\n ----------\n name: str\n The name of the flux.\n origin_sensor: str\n The name of the sensor at the origin.\n distant_sensor: str\n The name of the sensor not at the origin.\n conductivity: float\n The conductivity of the path between the sensors.\n length: float\n The length of the path between the sensors.\n unit: str\n The unit type of the flux.\n \"\"\"\n\n name: str\n origin_sensor: str\n distant_sensor: str\n conductivity: float\n length: float\n unit: str\n\n @classmethod\n def get(cls, path: str) -> List[FluxParam]:\n \"\"\"Process a CSV file at ``path``, returning a ``List`` of ``FluxParam``.\"\"\"\n\n params = []\n with open(path) as csv_file:\n reader = DictReader(csv_file)\n for row in reader:\n params.append(\n cls(\n row[\"name\"],\n row[\"origin_sensor\"],\n row[\"distant_sensor\"],\n float(row[\"conductivity\"]),\n float(row[\"length\"]),\n row[\"unit\"],\n )\n )\n return params\n\n\nclass ExtrapParam(NamedTuple):\n \"\"\"\n Parameters for extrapolation from two sensors and a flux to a point of interest.\n\n Parameters\n ----------\n name: str\n The name of the extrapolation.\n origin_sensor: str\n The name of the sensor at the origin.\n distant_sensor: str\n The name of the sensor not at the origin.\n conductivity: float\n The conductivity of the path from ``distant_sensor`` to the point of interest.\n length: float\n The length of the path from ``distant_sensor`` to the point of interest.\n unit: str\n The unit type of the extrapolation.\n \"\"\"\n\n name: str\n origin_sensor: str\n flux: str\n conductivity: float\n length: float\n unit: str\n\n @classmethod\n def get(cls, path: str) -> List[ExtrapParam]:\n \"\"\"Process a CSV file at ``path``, returning a ``List`` of ``ExtrapParam``.\"\"\"\n\n params = []\n with open(path) as csv_file:\n reader = DictReader(csv_file)\n for row in reader:\n params.append(\n cls(\n row[\"name\"],\n row[\"origin_sensor\"],\n row[\"flux\"],\n float(row[\"conductivity\"]),\n float(row[\"length\"]),\n row[\"unit\"],\n )\n )\n return params\n\n\nclass PowerParam(NamedTuple):\n \"\"\"\n Parameters for power supplies.\n\n Parameters\n ----------\n name: str\n The name of the power supply parameter.\n unit:\n The unit of the power supply parameter.\n \"\"\"\n\n name: str\n unit: str\n\n @classmethod\n def get(cls, path: str) -> List[PowerParam]:\n \"\"\"Process a CSV file at ``path``, returning a ``List`` of ``ExtrapParam``.\"\"\"\n\n power_supplies = []\n with open(path) as csv_file:\n reader = DictReader(csv_file)\n for row in reader:\n power_supplies.append(\n cls(\n row[\"name\"],\n row[\"unit\"],\n )\n )\n return power_supplies\n\n\nclass Result:\n \"\"\"\n A result.\n\n Attributes\n ----------\n source: str\n The source of the result.\n value: float\n The value of the result.\n time: float\n The time that the result was taken, with the oldest result at zero.\n history: Deque[float]\n Previous values resulting from the source.\n \"\"\"\n\n def __init__(self):\n self.source = None\n self.value = None\n self.time = deque([], maxlen=HISTORY_LENGTH)\n self.history = deque([], maxlen=HISTORY_LENGTH)\n for _ in range(HISTORY_LENGTH):\n self.time.append(0)\n self.history.append(0)\n\n def update(self):\n \"\"\"Update the result.\"\"\"\n\n self.history.append(self.value)\n self.time.append(self.time[-1] + DELAY)\n\n @staticmethod\n def get(name: str, results: List[Result]) -> Result:\n \"\"\"Get a result or results by the source name.\"\"\"\n\n result_names = [result.source.name for result in results]\n i = result_names.index(name)\n result = results[i]\n return result\n\n\nclass Reading(Result):\n \"\"\"\n A reading directly from a sensor.\n\n Parameters\n ----------\n sensor: Sensor\n The sensor parameters used to get a result.\n\n Attributes\n ----------\n unit_types: Dict[str: int]\n Enumeration of unit types supported by the board on which the sensor resides.\n debug_offset: float\n A random offset to use when debugging.\n \"\"\"\n\n unit_types = {\"C\": 0, \"F\": 1, \"K\": 2, \"V\": 5}\n\n def __init__(self, sensor: Sensor):\n super().__init__()\n if DEBUG:\n self.debug_offset = random.normal(scale=GAIN_DEBUG)\n self.source = sensor\n self.update()\n\n def update(self):\n \"\"\"Update the result.\"\"\"\n\n if DEBUG:\n self.value = (\n self.debug_offset\n + GAIN_DEBUG * (1 - exp(-self.time[-1] / TAU_DEBUG))\n + random.normal(scale=NOISE_SCALE * GAIN_DEBUG)\n )\n elif self.source.reading == \"temperature\":\n try:\n unit_int = self.unit_types[self.source.unit]\n self.value = t_in(self.source.board, self.source.channel, unit_int)\n except ULError:\n self.value = 0\n elif self.source.reading == \"voltage\":\n self.value = v_in(self.source.board, self.source.channel, 0)\n super().update()\n\n\nclass ScaledResult(Result):\n \"\"\"\n A scaled result.\n\n Parameters\n ----------\n scaled_param: ScaledParam\n The parameters for obtaining a scaled result.\n results: List[Result]\n A list of results containing the source to be scaled.\n\n Attributes\n ----------\n unscaled_result: Result\n The unscaled result.\n \"\"\"\n\n def __init__(\n self,\n scaled_param: ScaledParam,\n results: List[Result],\n ):\n super().__init__()\n self.source = scaled_param\n self.unscaled_result = Result.get(scaled_param.unscaled_sensor, results)\n self.update()\n\n def update(self):\n \"\"\"Update the result.\"\"\"\n\n self.value = self.unscaled_result.value * self.source.scale + self.source.offset\n super().update()\n\n\nclass Flux(Result):\n \"\"\"\n A flux result.\n\n Parameters\n ----------\n flux_param: FluxParam\n The parameters for obtaining a flux result.\n results: List[Result]\n A list of results containing the source to be scaled.\n\n Attributes\n ----------\n origin_result: Result\n The result of the source at the origin.\n distant_result: Result\n The result of the source not at the origin.\n \"\"\"\n\n def __init__(\n self,\n flux_param: FluxParam,\n results: List[Result],\n ):\n super().__init__()\n self.source = flux_param\n self.origin_result = Result.get(flux_param.origin_sensor, results)\n self.distant_result = Result.get(flux_param.distant_sensor, results)\n self.update()\n\n def update(self):\n \"\"\"Update the result.\"\"\"\n\n self.value = (\n self.source.conductivity\n / self.source.length\n * (self.origin_result.value - self.distant_result.value)\n )\n super().update()\n\n\nclass ExtrapResult(Result):\n \"\"\"\n An extrapolated result.\n\n Parameters\n ----------\n extrap_param: ExtrapParam\n The parameters for obtaining an extrapolated result.\n results: List[Result]\n A list of results containing the source to be scaled.\n\n Attributes\n ----------\n origin_result: Result\n The result of the source at the origin.\n distant_result: Result\n The result of the source not at the origin.\n \"\"\"\n\n def __init__(\n self,\n extrap_param: ExtrapParam,\n results: List[Result],\n ):\n super().__init__()\n self.source = extrap_param\n self.origin_result = Result.get(extrap_param.origin_sensor, results)\n self.flux_result = Result.get(extrap_param.flux, results)\n\n self.update()\n\n def update(self):\n \"\"\"Update the result.\"\"\"\n\n self.value = self.origin_result.value - (\n self.flux_result.value * self.source.length / self.source.conductivity\n )\n super().update()\n\n\nclass PowerResult(Result):\n \"\"\"\n A result from a power supply.\n\n Parameters\n ----------\n power_param: PowerParam\n The parameters for obtaining a result from the power supply.\n instrument\n The VISA instrument from which to obtain the result.\n current_limit: float\n The current limit to be set.\n \"\"\"\n\n def __init__(\n self,\n power_param: PowerParam,\n instrument,\n current_limit: float,\n ):\n self.source = power_param\n self.instrument = instrument\n if self.source.name == \"V\":\n self.instrument.write(\"output:state on\")\n self.instrument.write(\"source:current \" + str(current_limit))\n self.update()\n\n def update(self):\n \"\"\"Update the result.\"\"\"\n\n try:\n if self.source.name == \"V\":\n self.value = float(self.instrument.query(\"measure:voltage?\"))\n elif self.source.name == \"I\":\n self.value = float(self.instrument.query(\"measure:current?\"))\n except VisaIOError as exc:\n print(exc)\n\n def write(self, value):\n \"\"\"Write a value back to the instrument.\"\"\"\n\n try:\n if self.source.name == \"V\":\n self.instrument.write(\"source:voltage \" + str(value))\n elif self.source.name == \"I\":\n self.instrument.write(\"source:current \" + str(value))\n except VisaIOError as exc:\n print(exc)\n\n\nclass ResultGroup(OrderedDict):\n \"\"\"\n A group of results.\n\n Parameters\n ----------\n group_dict\n Dictionary of result groupings.\n results: List[Result]\n List of results containing the results to be grouped.\n \"\"\"\n\n def __init__(self, group_dict: OrderedDict, results: List[Result]):\n for key, val in group_dict.items():\n result_names = val.split()\n filtered_results = []\n for name in result_names:\n result = Result.get(name, results)\n filtered_results.append(result)\n self[key] = filtered_results\n\n\nclass Controller:\n \"\"\"\n A PID controller.\n\n Parameters\n ----------\n control_result: PowerResult\n The result to control based on feedback.\n feedback_result: Result\n The result to get feedback from.\n setpoint: float\n The value that the feedback should be coerced to through PID control.\n gains: List[float]\n List of the proportional, integral, and derivative gains of the PID controller.\n output_limits: Tuple[float, float]\n Limits of the PID controller.\n start_delay: float\n Time to wait before activating PID.\n\n Attributes\n ----------\n pid: PID\n The PID controller.\n start_time\n The time that the controller was created.\n \"\"\"\n\n def __init__(\n self,\n control_result: PowerResult,\n feedback_result: Result,\n setpoint: float,\n gains: List[float],\n output_limits: Tuple[float, float],\n start_delay: float = 0,\n ):\n self.control_result = control_result\n self.feedback_result = feedback_result\n self.pid = PID(*gains, setpoint, output_limits=output_limits)\n self.start_time = datetime.now()\n self.start_delay = timedelta(seconds=start_delay)\n self.feedback_value = self.feedback_result.value\n self.last_feedback_value = self.feedback_value\n self.count_of_suspicious_readings = 0\n\n def update(self):\n \"\"\"Update the PID controller.\"\"\"\n\n time_elapsed = datetime.now() - self.start_time\n if time_elapsed > self.start_delay:\n self.last_feedback_value = self.feedback_value\n self.feedback_value = self.feedback_result.value\n feedback_value_change = abs(self.feedback_value - self.last_feedback_value)\n if feedback_value_change > 10 or self.feedback_value < 0:\n self.control_result.write(0)\n raise Exception(\n \"The PID feedback sensor value seems incorrect. Aborting.\"\n )\n control_value = self.pid(self.feedback_value)\n print(f\"{self.feedback_value} {control_value}\")\n self.control_result.write(control_value)\n\n\nclass Writer:\n \"\"\"A CSV file writer.\n\n Parameters\n ----------\n path: str\n Base name of the first results CSV to be written (e.g. `results.csv`). The ISO\n time of creation of the file will be appended to the provided path (e.g.\n `results_yyyy_mm_ddThh-mm-ss.csv`).\n results: List[Result]\n The first list of results to be written to a file.\n\n Attributes\n ----------\n paths: List[str]\n Base names of multiple results CSVs to be written to.\n result_groups: List[List[Result]]\n Groups of results to be written to each of the names in `paths`.\n fieldname_groups: List[str]\n Groups of fieldnames to be written to each of the names in `paths`.\n time: datetime\n The time that the last value was taken.\n \"\"\"\n\n def __init__(\n self,\n path: str,\n results: List[Result],\n ):\n self.paths: List[str] = []\n self.result_groups: List[List[Result]] = []\n self.fieldname_groups: List[List[str]] = []\n self.time = datetime.now()\n self.add(path, results)\n\n def add(self, path: str, results: List[Result]):\n \"\"\"Add a CSV file to be written to and a set of results to write to it.\n\n Parameters\n ----------\n path: str\n Base name of additional results CSVs to be written (e.g. `results.csv`). The\n ISO time of creation of the file will be appended to the provided path (e.g.\n `results_yyyy_mm_ddThh-mm-ss.csv`).\n results: List[Result]\n Additonal list of results to be written to a file.\n \"\"\"\n\n (path, ext) = splitext(path)\n\n # The \":\" in ISO time strings is not supported by filenames\n file_time = self.time.isoformat(timespec=\"seconds\").replace(\":\", \"-\")\n\n path = path + \"_\" + file_time + ext\n\n # Compose the fieldnames and first row of values\n sources = [\n result.source.name + \" (\" + result.source.unit + \")\" for result in results\n ]\n fieldnames = [\"time\"] + sources\n values = [self.time.isoformat()] + [result.value for result in results]\n to_write = dict(zip(fieldnames, values))\n\n # Create the CSV, writing the header and the first row of values\n with open(path, \"w\", newline=\"\") as csv_file:\n csv_writer = DictWriter(csv_file, fieldnames=fieldnames)\n csv_writer.writeheader()\n csv_writer.writerow(to_write)\n\n # Record the file and results for writing additional rows later.\n self.paths.append(path)\n self.result_groups.append(results)\n self.fieldname_groups.append(fieldnames)\n\n def update(self):\n \"\"\"Update results and write the new data to CSV.\"\"\"\n\n if DEBUG:\n sleep(DELAY_DEBUG)\n else:\n sleep(DELAY)\n self.time = datetime.now().isoformat()\n for results in self.result_groups:\n for result in results:\n result.update()\n self.write()\n\n def write(self):\n \"\"\"Write data to CSV.\"\"\"\n\n for path, results, fieldnames in zip(\n self.paths, self.result_groups, self.fieldname_groups\n ):\n values = [self.time] + [result.value for result in results]\n to_write = dict(zip(fieldnames, values))\n\n with open(path, \"a\", newline=\"\") as csv_file:\n csv_writer = DictWriter(csv_file, fieldnames=fieldnames)\n csv_writer.writerow(to_write)\n\n\nclass Plotter:\n \"\"\"A plotter for data.\n\n Parameters\n ----------\n title: str\n The title of the first plot.\n results: List[Result]\n The results to plot.\n row: int = 0\n The window row to place the first plot.\n col: int = 0\n The window column to place the first plot.\n\n Attributes\n ----------\n all_results: List[Result]\n all_curves: List[pyqtgraph.PlotCurveItem]\n all_histories: List[Deque]\n time: List[int]\n \"\"\"\n\n window = pyqtgraph.GraphicsWindow()\n\n def __init__(\n self,\n title: str,\n results: List[Result],\n row: int = 0,\n col: int = 0,\n ):\n self.all_results: List[Result] = []\n self.all_curves: List[pyqtgraph.PlotCurveItem] = []\n self.all_histories: List[Deque] = []\n self.time: List[int] = []\n for i in range(0, HISTORY_LENGTH):\n self.time.append(-i * DELAY)\n self.time.reverse()\n self.add(title, results, row, col)\n\n def add(self, title: str, results: List[Result], row: int, col: int):\n \"\"\"Plot results to a new pane in the plot window.\n\n Parameters\n ----------\n title: str\n The title of an additional plot.\n results: List[Result]\n The results to plot.\n row: int = 0\n The window row to place an additional plot.\n col: int = 0\n The window column to place an additional plot.\n \"\"\"\n i = 0\n plot = self.window.addPlot(row, col)\n plot.addLegend()\n plot.setLabel(\"left\", units=results[0].source.unit)\n plot.setLabel(\"bottom\", units=\"s\")\n plot.setTitle(title)\n self.all_results.extend(results)\n histories = [result.history for result in results]\n self.all_histories.extend(histories)\n names = [result.source.name for result in results]\n for history, name in zip(histories, names):\n curve = plot.plot(self.time, history, pen=pyqtgraph.intColor(i), name=name)\n self.all_curves.append(curve)\n i += 1\n\n def update(self):\n \"\"\"Update plots.\"\"\"\n for curve, history in zip(self.all_curves, self.all_histories):\n curve.setData(self.time, history)\n\n\nclass Looper:\n \"\"\"Handles threads for plotting, writing, and control.\n\n Parameters\n ----------\n writer: Writer\n The writer.\n plotter: Plotter\n The plotter.\n controller: Optional[Controller]\n The controller.\n\n Attributes\n ----------\n plot_window_open: bool\n Whether the plot window is currently open.\n \"\"\"\n\n def __init__(\n self, writer: Writer, plotter: Plotter, controller: Optional[Controller] = None\n ):\n self.writer = writer\n self.plotter = plotter\n if controller is None:\n self.controller = None\n else:\n self.controller = controller\n self.plot_window_open = False\n\n def write_loop(self):\n \"\"\"The CSV writer function to be looped in the write/control thread.\"\"\"\n\n while self.plot_window_open:\n self.writer.update()\n\n def plot_loop(self):\n \"\"\"The function to be looped in the plot thread.\"\"\"\n\n self.plotter.update()\n\n def write_control_loop(self):\n \"\"\"The control function to be looped in the write/control thread.\"\"\"\n\n while self.plot_window_open:\n self.writer.update()\n self.controller.update()\n\n def start(self):\n \"\"\"Start the write/control thread and plot on the main thread.\"\"\"\n\n self.plot_window_open = True\n if self.controller is None:\n write_thread = Thread(target=self.write_loop)\n write_thread.start()\n else:\n write_thread = Thread(target=self.write_control_loop)\n write_thread.start()\n\n plot_timer = pyqtgraph.QtCore.QTimer()\n plot_timer.timeout.connect(self.plot_loop)\n plot_timer.start()\n pyqtgraph.Qt.QtGui.QApplication.instance().exec_()\n self.plot_window_open = False\n" ]
[ [ "numpy.random.normal", "numpy.exp" ] ]
KiroSummer/AMR
[ "49f4edc9e738ba3409d2d5e45e5e1881d8b338cc" ]
[ "parser/decoder.py" ]
[ "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.nn import Parameter\nfrom parser.data import NIL, PAD\nfrom parser.utils import compute_f_by_tensor\nfrom parser.transformer import MultiheadAttention, Transformer, TiedTransformer\n\nfrom parser.utils import label_smoothed_nll_loss\n\n\nclass ArcGenerator(nn.Module):\n def __init__(self, vocabs, embed_dim, ff_embed_dim, num_heads, dropout):\n super(ArcGenerator, self).__init__()\n self.vocabs = vocabs\n self.arc_layer = MultiheadAttention(embed_dim, num_heads, dropout, weights_dropout=False)\n self.arc_layer_norm = nn.LayerNorm(embed_dim)\n self.fc1 = nn.Linear(embed_dim, ff_embed_dim)\n self.fc2 = nn.Linear(ff_embed_dim, embed_dim)\n self.ff_layer_norm = nn.LayerNorm(embed_dim)\n self.dropout = dropout\n\n def forward(self, outs, graph_state, graph_padding_mask, attn_mask, target_rel=None, work=False):\n x, arc_weight = self.arc_layer(outs, graph_state, graph_state,\n key_padding_mask=graph_padding_mask,\n attn_mask=attn_mask,\n need_weights='max')\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.arc_layer_norm(outs + x)\n residual = x\n x = F.relu(self.fc1(x))\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n outs = self.ff_layer_norm(residual + x)\n\n if work:\n arc_ll = torch.log(arc_weight + 1e-12)\n return arc_ll, outs\n target_arc = torch.ne(target_rel, self.vocabs['rel'].token2idx(NIL)) # 0 or 1\n arc_mask = torch.eq(target_rel, self.vocabs['rel'].token2idx(PAD))\n pred = torch.ge(arc_weight, 0.5)\n if not self.training:\n print('arc p %.3f r %.3f f %.3f' % compute_f_by_tensor(pred, target_arc, arc_mask))\n arc_loss = F.binary_cross_entropy(arc_weight, target_arc.float(), reduction='none')\n arc_loss = arc_loss.masked_fill_(arc_mask, 0.).sum((0, 2))\n return arc_loss, outs\n\n\nclass ConceptGenerator(nn.Module):\n def __init__(self, vocabs, embed_dim, ff_embed_dim, conc_size, dropout):\n super(ConceptGenerator, self).__init__()\n self.alignment_layer = MultiheadAttention(embed_dim, 1, dropout, weights_dropout=False)\n self.alignment_layer_norm = nn.LayerNorm(embed_dim)\n self.fc1 = nn.Linear(embed_dim, ff_embed_dim)\n self.fc2 = nn.Linear(ff_embed_dim, embed_dim)\n self.ff_layer_norm = nn.LayerNorm(embed_dim)\n self.transfer = nn.Linear(embed_dim, conc_size)\n self.generator = nn.Linear(conc_size, vocabs['predictable_concept'].size)\n self.diverter = nn.Linear(conc_size, 3)\n self.vocabs = vocabs\n self.dropout = dropout\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.transfer.weight, std=0.02)\n nn.init.normal_(self.diverter.weight, std=0.02)\n nn.init.normal_(self.generator.weight, std=0.02)\n nn.init.constant_(self.diverter.bias, 0.)\n nn.init.constant_(self.transfer.bias, 0.)\n nn.init.constant_(self.generator.bias, 0.)\n\n def forward(self, outs, snt_state, snt_padding_mask, copy_seq,\n target=None, work=False):\n x, alignment_weight = self.alignment_layer(outs, snt_state, snt_state,\n key_padding_mask=snt_padding_mask,\n need_weights='one')\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.alignment_layer_norm(outs + x)\n residual = x\n x = F.relu(self.fc1(x))\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n outs = self.ff_layer_norm(residual + x)\n\n seq_len, bsz, _ = outs.size()\n outs_concept = torch.tanh(self.transfer(outs))\n outs_concept = F.dropout(outs_concept, p=self.dropout, training=self.training)\n\n gen_gate, map_gate, copy_gate = F.softmax(self.diverter(outs_concept), -1).chunk(3, dim=-1)\n copy_gate = torch.cat([copy_gate, map_gate], -1)\n\n probs = gen_gate * F.softmax(self.generator(outs_concept), -1)\n\n tot_ext = 1 + copy_seq.max().item()\n vocab_size = probs.size(-1)\n\n if tot_ext - vocab_size > 0:\n ext_probs = probs.new_zeros((1, 1, tot_ext - vocab_size)).expand(seq_len, bsz, -1)\n probs = torch.cat([probs, ext_probs], -1)\n # copy_seq: src_len x bsz x 2\n # copy_gate: tgt_len x bsz x 2\n # alignment_weight: tgt_len x bsz x src_len\n # index: tgt_len x bsz x (src_len x 2)\n index = copy_seq.transpose(0, 1).contiguous().view(1, bsz, -1).expand(seq_len, -1, -1)\n copy_probs = (copy_gate.unsqueeze(2) * alignment_weight.unsqueeze(-1)).view(seq_len, bsz, -1)\n probs = torch.scatter_add(probs, -1, index, copy_probs) # modify add_ to add\n ll = torch.log(probs + 1e-12)\n\n if work:\n return ll, outs\n\n if not self.training:\n _, pred = torch.max(ll, -1)\n total_concepts = torch.ne(target, self.vocabs['predictable_concept'].padding_idx)\n acc = torch.eq(pred, target).masked_select(total_concepts).float().sum().item()\n tot = total_concepts.sum().item()\n print('conc acc', acc / tot)\n\n concept_loss = -ll.gather(dim=-1, index=target.unsqueeze(-1)).squeeze(-1)\n concept_mask = torch.eq(target, self.vocabs['predictable_concept'].padding_idx)\n concept_loss = concept_loss.masked_fill_(concept_mask, 0.).sum(0)\n return concept_loss, outs\n\n\nclass RelationGenerator(nn.Module):\n\n def __init__(self, vocabs, embed_dim, rel_size, dropout):\n super(RelationGenerator, self).__init__()\n self.vocabs = vocabs\n self.transfer_head = nn.Linear(embed_dim, rel_size)\n self.transfer_dep = nn.Linear(embed_dim, rel_size)\n\n self.proj = nn.Linear(rel_size + 1, vocabs['rel'].size * (rel_size + 1))\n self.dropout = dropout\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.transfer_head.weight, std=0.02)\n nn.init.normal_(self.transfer_dep.weight, std=0.02)\n nn.init.normal_(self.proj.weight, std=0.02)\n\n nn.init.constant_(self.proj.bias, 0.)\n nn.init.constant_(self.transfer_head.bias, 0.)\n nn.init.constant_(self.transfer_dep.bias, 0.)\n\n def forward(self, outs, graph_state, target_rel=None, work=False):\n\n def get_scores(dep, head):\n head = torch.tanh(self.transfer_head(head))\n dep = torch.tanh(self.transfer_dep(dep))\n\n head = F.dropout(head, p=self.dropout, training=self.training)\n dep = F.dropout(dep, p=self.dropout, training=self.training)\n\n dep_num, bsz, _ = dep.size()\n head_num = head.size(0)\n\n bias_dep = dep.new_ones((dep_num, bsz, 1))\n bias_head = head.new_ones((head_num, bsz, 1))\n\n # seq_len x bsz x dim\n dep = torch.cat([dep, bias_dep], 2)\n head = torch.cat([head, bias_head], 2)\n\n # bsz x dep_num x vocab_size x dim\n dep = self.proj(dep).view(dep_num, bsz, self.vocabs['rel'].size, -1).transpose(0, 1).contiguous()\n # bsz x dim x head_num\n head = head.permute(1, 2, 0)\n\n # bsz x dep_num x vocab_size x head_num\n scores = torch.bmm(dep.view(bsz, dep_num * self.vocabs['rel'].size, -1), head).view(bsz, dep_num,\n self.vocabs['rel'].size,\n head_num)\n return scores\n\n scores = get_scores(outs, graph_state).permute(1, 0, 3, 2).contiguous()\n\n dep_num, bsz, _ = outs.size()\n head_num = graph_state.size(0)\n log_probs = F.log_softmax(scores, dim=-1)\n _, rel = torch.max(log_probs, -1)\n if work:\n # dep_num x bsz x head x vocab\n return log_probs\n\n rel_mask = torch.eq(target_rel, self.vocabs['rel'].token2idx(NIL)) + torch.eq(target_rel,\n self.vocabs['rel'].token2idx(PAD))\n rel_acc = (torch.eq(rel, target_rel).float().masked_fill_(rel_mask, 0.)).sum().item()\n rel_tot = rel_mask.numel() - rel_mask.float().sum().item()\n if not self.training:\n print('rel acc %.3f' % (rel_acc / rel_tot))\n rel_loss = label_smoothed_nll_loss(log_probs.view(-1, self.vocabs['rel'].size), target_rel.view(-1), 0.).view(\n dep_num, bsz, head_num)\n rel_loss = rel_loss.masked_fill_(rel_mask, 0.).sum((0, 2))\n return rel_loss\n\n\nclass DecodeLayer(nn.Module):\n def __init__(self, vocabs, inference_layers, embed_dim, ff_embed_dim, num_heads, conc_size, rel_size, dropout):\n super(DecodeLayer, self).__init__()\n self.inference_layers = inference_layers\n self.arc_generator = ArcGenerator(vocabs, embed_dim, ff_embed_dim, num_heads, dropout)\n self.concept_generator = ConceptGenerator(vocabs, embed_dim, ff_embed_dim, conc_size, dropout)\n self.relation_generator = RelationGenerator(vocabs, embed_dim, rel_size, dropout) # biaffine @kiro\n self.dropout = dropout\n self.vocabs = vocabs\n\n def forward(self, probe, snt_state, graph_state,\n snt_padding_mask, graph_padding_mask, attn_mask,\n copy_seq, target=None, target_rel=None,\n work=False):\n # probe: tgt_len x bsz x embed_dim\n # snt_state, graph_state: seq_len x bsz x embed_dim\n\n outs = F.dropout(probe, p=self.dropout, training=self.training)\n\n if work:\n for i in range(self.inference_layers):\n arc_ll, outs = self.arc_generator(outs, graph_state, graph_padding_mask, attn_mask, work=True)\n concept_ll, outs = self.concept_generator(outs, snt_state, snt_padding_mask, copy_seq, work=True)\n # all_ll [1, hypotheses_size, steps] @kiro steps == nodes num\n rel_ll = self.relation_generator(outs, graph_state, work=True)\n return concept_ll, arc_ll, rel_ll\n\n arc_losses, concept_losses, rel_losses = [], [], []\n for i in range(self.inference_layers):\n arc_loss, outs = self.arc_generator(outs, graph_state, graph_padding_mask, attn_mask,\n target_rel=target_rel,\n work=False)\n concept_loss, outs = self.concept_generator(outs, snt_state, snt_padding_mask, copy_seq,\n target=target,\n work=False)\n arc_losses.append(arc_loss)\n concept_losses.append(concept_loss)\n # del arc_loss, concept_loss # delete the intermediate variable\n rel_loss = self.relation_generator(outs, graph_state, target_rel=target_rel, work=False)\n arc_loss = arc_losses[-1] # torch.stack(arc_losses).mean(0)\n concept_loss = concept_losses[-1] # torch.stack(concept_losses).mean(0)\n return concept_loss, arc_loss, rel_loss\n" ]
[ [ "torch.ge", "torch.max", "torch.nn.functional.log_softmax", "torch.nn.functional.dropout", "torch.nn.init.constant_", "torch.cat", "torch.eq", "torch.nn.LayerNorm", "torch.nn.Linear", "torch.nn.init.normal_", "torch.log", "torch.scatter_add", "torch.ne" ] ]
sugarchain-dev/electrum-sugar
[ "5c70bfa883950bb501625916a888618792cd635b" ]
[ "electrum/plot.py" ]
[ "import datetime\nfrom collections import defaultdict\n\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as md\n\nfrom .i18n import _\nfrom .bitcoin import COIN\n\n\nclass NothingToPlotException(Exception):\n def __str__(self):\n return _(\"Nothing to plot.\")\n\n\ndef plot_history(history):\n if len(history) == 0:\n raise NothingToPlotException()\n hist_in = defaultdict(int)\n hist_out = defaultdict(int)\n for item in history:\n if not item['confirmations']:\n continue\n if item['timestamp'] is None:\n continue\n value = item['value'].value/COIN\n date = item['date']\n datenum = int(md.date2num(datetime.date(date.year, date.month, 1)))\n if value > 0:\n hist_in[datenum] += value\n else:\n hist_out[datenum] -= value\n\n f, axarr = plt.subplots(2, sharex=True)\n plt.subplots_adjust(bottom=0.2)\n plt.xticks( rotation=25 )\n ax = plt.gca()\n plt.ylabel('SUGAR')\n plt.xlabel('Month')\n xfmt = md.DateFormatter('%Y-%m-%d')\n ax.xaxis.set_major_formatter(xfmt)\n axarr[0].set_title('Monthly Volume')\n xfmt = md.DateFormatter('%Y-%m')\n ax.xaxis.set_major_formatter(xfmt)\n width = 20\n\n r1 = None\n r2 = None\n dates_values = list(zip(*sorted(hist_in.items())))\n if dates_values and len(dates_values) == 2:\n dates, values = dates_values\n r1 = axarr[0].bar(dates, values, width, label='incoming')\n axarr[0].legend(loc='upper left')\n dates_values = list(zip(*sorted(hist_out.items())))\n if dates_values and len(dates_values) == 2:\n dates, values = dates_values\n r2 = axarr[1].bar(dates, values, width, color='r', label='outgoing')\n axarr[1].legend(loc='upper left')\n if r1 is None and r2 is None:\n raise NothingToPlotException()\n return plt\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.dates.DateFormatter", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel" ] ]
tchayintr/thwcc-attn
[ "4ee38365eb338e82bf455f46ff5fe7c59fb7a975" ]
[ "src/models/rnn_with_chunk_sw_tagger.py" ]
[ "from allennlp.modules.conditional_random_field import ConditionalRandomField\nfrom allennlp.nn.util import get_mask_from_sequence_lengths\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pad_sequence\n\nimport sys\n\nimport constants\nimport models.util\nfrom models.util import ModelUsage\nfrom models.common import BiaffineCombination, MLP\nfrom models.tagger import RNNTagger\n\n\nclass RNNTaggerWithChunkSubword(RNNTagger):\n def __init__(self,\n n_vocab,\n unigram_embed_size,\n n_bigrams,\n bigram_embed_size,\n n_chunks,\n chunk_embed_size,\n n_subwords,\n subword_embed_size,\n rnn_unit_type,\n rnn_bidirection,\n rnn_batch_first,\n rnn_n_layers1,\n rnn_hidden_size1,\n rnn_n_layers2,\n rnn_hidden_size2,\n rnn_n_layers3,\n rnn_hidden_size3,\n mlp_n_layers,\n mlp_hidden_size,\n n_labels,\n use_crf=True,\n feat_size=0,\n rnn_dropout=0.0,\n embed_dropout=0.0,\n biaffine_dropout=0.0,\n mlp_dropout=0.0,\n chunk_vector_dropout=0,\n subword_vector_dropout=0,\n pretrained_unigram_embed_size=0,\n pretrained_bigram_embed_size=0,\n pretrained_chunk_embed_size=0,\n pretrained_subword_embed_size=0,\n pretrained_embed_usage=ModelUsage.NONE,\n chunk_pooling_type=constants.AVG,\n subword_pooling_type=constants.AVG,\n min_chunk_len=1,\n max_chunk_len=0,\n min_subword_len=1,\n max_subword_len=0,\n chunk_loss_ratio=0,\n subword_loss_ratio=0,\n biaffine_type='',\n reverse=False,\n file=sys.stderr):\n nn.Module.__init__(self)\n\n self.n_vocab = n_vocab\n self.unigram_embed_size = unigram_embed_size\n self.n_bigrams = n_bigrams\n self.bigram_embed_size = bigram_embed_size\n self.n_chunks = n_chunks\n self.chunk_embed_size = chunk_embed_size\n self.n_subwords = n_subwords\n self.subword_embed_size = subword_embed_size\n\n self.rnn_unit_type = rnn_unit_type\n self.rnn_bidirection = rnn_bidirection\n self.rnn_batch_first = rnn_batch_first\n self.rnn_n_layers1 = rnn_n_layers1\n self.rnn_hidden_size1 = rnn_hidden_size1\n self.rnn_n_layers2 = rnn_n_layers2\n self.rnn_hidden_size2 = rnn_hidden_size2\n self.rnn_n_layers3 = rnn_n_layers3\n self.rnn_hidden_size3 = rnn_hidden_size3\n\n self.mlp_n_layers = mlp_n_layers\n self.mlp_hidden_size = mlp_hidden_size\n self.n_labels = n_labels\n self.use_crf = use_crf\n self.feat_size = feat_size\n\n self.rnn_dropout = rnn_dropout\n self.embed_dropout = embed_dropout\n self.biaffine_dropout = biaffine_dropout\n self.mlp_dropout = mlp_dropout\n self.chunk_vector_dropout = chunk_vector_dropout\n self.subword_vector_dropout = subword_vector_dropout\n\n self.pretrained_unigram_embed_size = pretrained_unigram_embed_size\n self.pretrained_bigram_embed_size = pretrained_bigram_embed_size\n self.pretrained_chunk_embed_size = pretrained_chunk_embed_size\n self.pretrained_subword_embed_size = pretrained_subword_embed_size\n self.pretrained_embed_usage = pretrained_embed_usage\n\n self.chunk_pooling_type = chunk_pooling_type\n self.subword_pooling_type = subword_pooling_type\n self.min_chunk_len = min_chunk_len\n self.max_chunk_len = max_chunk_len\n self.min_subword_len = min_subword_len\n self.max_subword_len = max_subword_len\n self.chunk_loss_ratio = chunk_loss_ratio\n self.subword_loss_ratio = subword_loss_ratio\n\n self.biaffine_type = biaffine_type\n self.reverse = reverse\n\n self.use_chunk_attention = (chunk_pooling_type == constants.WAVG\n or chunk_pooling_type == constants.WCON)\n self.use_chunk_concat = (chunk_pooling_type == constants.CON\n or chunk_pooling_type == constants.WCON)\n self.use_subword_attention = (subword_pooling_type == constants.SWAVG\n or subword_pooling_type\n == constants.SWCON)\n self.use_subword_concat = (subword_pooling_type == constants.CON\n or subword_pooling_type == constants.SWCON)\n self.use_chunk_average = not self.use_chunk_concat\n self.use_subword_average = not self.use_subword_concat\n self.use_rnn2 = rnn_n_layers2 > 0 and rnn_hidden_size2 > 0\n self.use_rnn3 = rnn_n_layers3 > 0 and rnn_hidden_size3 > 0\n\n self.chunk_embed_size_merged = (\n chunk_embed_size +\n (pretrained_chunk_embed_size\n if pretrained_embed_usage == ModelUsage.CONCAT else 0))\n self.subword_embed_size_merged = (\n subword_embed_size +\n (pretrained_subword_embed_size\n if pretrained_embed_usage == ModelUsage.CONCAT else 0))\n\n if self.use_chunk_concat:\n self.chunk_concat_num = sum(\n [i for i in range(min_chunk_len, max_chunk_len + 1)])\n self.chunk_embed_out_size = self.chunk_embed_size_merged * self.chunk_concat_num\n else:\n self.chunk_embed_out_size = self.chunk_embed_size_merged\n\n if self.use_subword_concat:\n self.subword_concat_num = sum(\n [i for i in range(min_subword_len, max_subword_len + 1)])\n self.subword_embed_out_size = self.subword_embed_size_merged * self.subword_concat_num\n else:\n self.subword_embed_out_size = self.subword_embed_size_merged\n\n self.unigram_embed = None\n self.bigram_embed = None\n self.chunk_embed = None\n self.subword_embed = None\n self.pretrained_unigram_embed = None\n self.pretrained_bigram_embed = None\n self.pretrained_subword_embed = None\n self.pretrained_chunk_embed = None\n self.rnn = None\n self.biaffine = None\n self.rnn2 = None\n self.rnn3 = None\n self.mlp = None\n self.crf = None\n self.cross_entropy_loss = None\n\n print('### Parameters', file=sys.stderr)\n print('# Chunk pooling type: {}'.format(self.chunk_pooling_type),\n file=sys.stderr)\n print('# Chunk loss ratio: {}'.format(self.chunk_loss_ratio),\n file=sys.stderr)\n print('# Subword pooling type: {}'.format(self.subword_pooling_type),\n file=sys.stderr)\n print('# Subword loss ratio: {}'.format(self.subword_loss_ratio),\n file=sys.stderr)\n\n # embeddings layer(s)\n\n print('# Embedding dropout ratio={}'.format(self.embed_dropout),\n file=sys.stderr)\n self.unigram_embed, self.pretrained_unigram_embed = models.util.construct_embeddings(\n n_vocab, unigram_embed_size, pretrained_unigram_embed_size,\n pretrained_embed_usage)\n if self.pretrained_embed_usage != ModelUsage.NONE:\n print('# Pretrained embedding usage: {}'.format(\n self.pretrained_embed_usage),\n file=sys.stderr)\n print('# Unigram embedding matrix: W={}'.format(\n self.unigram_embed.weight.shape),\n file=sys.stderr)\n embed_size = self.unigram_embed.weight.shape[1]\n if self.pretrained_unigram_embed is not None:\n if self.pretrained_embed_usage == ModelUsage.CONCAT:\n embed_size += self.pretrained_unigram_embed_size\n print('# Pretrained unigram embedding matrix: W={}'.format(\n self.pretrained_unigram_embed.weight.shape),\n file=sys.stderr)\n\n if n_bigrams > 0 and bigram_embed_size > 0:\n self.bigram_embed, self.pretrained_bigram_embed = models.util.construct_embeddings(\n n_bigrams, bigram_embed_size, pretrained_bigram_embed_size,\n pretrained_embed_usage)\n if self.pretrained_embed_usage != ModelUsage.NONE:\n print('# Pretrained embedding usage: {}'.format(\n self.pretrained_embed_usage),\n file=sys.stderr)\n print('# Bigram embedding matrix: W={}'.format(\n self.bigram_embed.weight.shape),\n file=sys.stderr)\n embed_size += self.bigram_embed.weight.shape[1]\n if self.pretrained_bigram_embed is not None:\n if self.pretrained_embed_usage == ModelUsage.CONCAT:\n embed_size += self.pretrained_bigram_embed.weight.shape[1]\n print('# Pretrained bigram embedding matrix: W={}'.format(\n self.pretrained_bigram_embed.weight.shape),\n file=sys.stderr)\n\n self.additional_feat_size = feat_size\n if feat_size > 0:\n embed_size += feat_size\n print(\n '# Additional features size (dimension): {}'.format(feat_size),\n file=sys.stderr)\n\n self.chunk_embed, self.pretrained_chunk_embed = models.util.construct_embeddings(\n n_chunks, chunk_embed_size, pretrained_chunk_embed_size,\n pretrained_embed_usage)\n print('# Chunk embedding matrix: W={}'.format(\n self.chunk_embed.weight.shape),\n file=sys.stderr)\n if self.pretrained_chunk_embed is not None:\n print('# Pretrained chunk embedding matrix: W={}'.format(\n self.pretrained_chunk_embed.weight.shape),\n file=sys.stderr)\n\n self.subword_embed, self.pretrained_subword_embed = models.util.construct_embeddings(\n n_subwords, subword_embed_size, pretrained_subword_embed_size,\n pretrained_embed_usage)\n print('# Subword embedding matrix: W={}'.format(\n self.subword_embed.weight.shape),\n file=sys.stderr)\n if self.pretrained_subword_embed is not None:\n print('# Pretrained subword embedding matrix: W={}'.format(\n self.pretrained_subword_embed.weight.shape),\n file=sys.stderr)\n\n # recurrent layers 1\n\n self.rnn_unit_type = rnn_unit_type\n self.rnn = models.util.construct_RNN(unit_type=rnn_unit_type,\n embed_size=embed_size,\n hidden_size=rnn_hidden_size1,\n n_layers=rnn_n_layers1,\n batch_first=rnn_batch_first,\n dropout=rnn_dropout,\n bidirectional=rnn_bidirection)\n rnn_output_size1 = rnn_hidden_size1 * (2 if rnn_bidirection else 1)\n\n # biaffine b/w token and chunk\n\n if self.use_chunk_attention:\n use_U = 'u' in biaffine_type or 'U' in biaffine_type\n use_V = 'v' in biaffine_type or 'V' in biaffine_type\n use_b = 'b' in biaffine_type or 'B' in biaffine_type\n\n biaffine_left_size = rnn_output_size1\n self.biaffine = BiaffineCombination(\n biaffine_left_size,\n self.chunk_embed_size_merged,\n use_U=use_U,\n use_V=use_V,\n use_b=use_b,\n dropout=biaffine_dropout,\n )\n\n print(\n '# Biaffine layer for attention: W={}, U={}, V={}, b={}, dropout={}'\n .format(\n self.biaffine.W.weight.shape, self.biaffine.U.weight.shape\n if self.biaffine.U is not None else None,\n self.biaffine.V.weight.shape\n if self.biaffine.V is not None else None,\n self.biaffine.b if self.biaffine.b is not None else None,\n self.biaffine_dropout),\n file=sys.stderr)\n\n # chunk vector dropout\n\n print('# Chunk vector dropout={}'.format(self.chunk_vector_dropout),\n file=sys.stderr)\n\n # subword vector dropout\n\n print('# Subword vector dropout={}'.format(\n self.subword_vector_dropout),\n file=sys.stderr)\n\n # recurrent layers 2\n\n embed_out_size = self.chunk_embed_out_size if not self.reverse else self.subword_embed_out_size\n embed_out_size2 = self.subword_embed_out_size if not self.reverse else self.chunk_embed_out_size\n\n if self.use_rnn2:\n rnn_input_size2 = rnn_output_size1 + embed_out_size\n\n self.rnn2 = models.util.construct_RNN(\n unit_type=rnn_unit_type,\n embed_size=rnn_input_size2,\n hidden_size=rnn_hidden_size2,\n n_layers=rnn_n_layers2,\n batch_first=rnn_batch_first,\n dropout=rnn_dropout,\n bidirectional=rnn_bidirection)\n rnn_output_size2 = rnn_hidden_size2 * (2 if rnn_bidirection else 1)\n mlp_input_size = rnn_output_size2\n else:\n mlp_input_size = rnn_output_size1 + embed_out_size\n\n # recurrent layers 3\n\n if self.use_rnn3:\n if self.use_rnn2:\n rnn_input_size3 = rnn_output_size2 + embed_out_size2\n else:\n rnn_input_size3 = rnn_output_size1 + embed_out_size2\n\n self.rnn3 = models.util.construct_RNN(\n unit_type=rnn_unit_type,\n embed_size=rnn_input_size3,\n hidden_size=rnn_hidden_size3,\n n_layers=rnn_n_layers3,\n batch_first=rnn_batch_first,\n dropout=rnn_dropout,\n bidirectional=rnn_bidirection)\n rnn_output_size3 = rnn_hidden_size3 * (2 if rnn_bidirection else 1)\n mlp_input_size = rnn_output_size3\n else:\n if self.use_rnn2:\n mlp_input_size = rnn_output_size2 + embed_out_size2\n else:\n mlp_input_size = rnn_output_size1 + embed_out_size2\n\n # MLP\n\n print('# MLP', file=sys.stderr)\n self.mlp = MLP(input_size=mlp_input_size,\n hidden_size=mlp_hidden_size,\n n_layers=mlp_n_layers,\n output_size=n_labels,\n dropout=mlp_dropout,\n activation=nn.Identity)\n\n # Inference layer (CRF/softmax)\n\n if self.use_crf:\n self.crf = ConditionalRandomField(n_labels)\n print('# CRF cost: {}'.format(self.crf.transitions.shape),\n file=sys.stderr)\n else:\n self.softmax_cross_entropy = nn.CrossEntropyLoss()\n\n \"\"\"\n us: mini-batch of token (char) sequences\n cs: mini-batch of chunk (word) sequences (can be reversed to sws)\n ds: mini-batch of chunk (word) sequences for concat models (can be reversed to swds)\n sws: mini-batch of subword sequences (can be reversed to cs)\n swds: mini-batch of subword sequences for concat models (can be reversed to ds)\n ms: mini-batch of chunk masking matrix (tuples) (can be reversed to qs)\n qs: mini-batch of subword masking matrix (tuples) (can be reversed to ms)\n bs: mini-batch of bigram sequences\n fs: mini-batch of additional features\n gls: mini-batch of segmentation label sequences\n gcs: mini-batch of attention chunk label sequences (can be reversed to gcws)\n gsws: mini-batch of attention subword label sequences (can be reversed to gcs)\n \"\"\"\n\n def forward(self,\n us,\n cs,\n ds,\n sws,\n swds,\n ms,\n qs,\n bs=None,\n fs=None,\n gls=None,\n gcs=None,\n gsws=None,\n calculate_loss=True):\n lengths = super().extract_lengths(us)\n us, cs, sws, bs, fs, gls, gcs, gsws = self.pad_features(\n us, cs, sws, bs, fs, gls, gcs, gsws)\n\n closs = None\n swloss = None\n pcs = None\n psws = None\n\n xs = self.extract_token_features(us, bs, None,\n fs) # token unigram etc. -[Embed]-> x\n rs = self.rnn_output(xs, lengths) # x -[RNN]-> r\n\n if not self.reverse:\n if cs is not None:\n ws = self.extract_chunk_features(\n cs) # chunk -[Embed]-> w (chunk sequence)\n else:\n ws = [None] * len(us)\n\n if ds is not None:\n vs = self.extract_chunk_features(\n ds) # chunk -[Embed]-> w (concatenated chunk matrix)\n else:\n vs = [None] * len(us)\n\n if sws is not None:\n cws = self.extract_subword_features(\n sws) # sw_chunk -[Embed]-> sw (subword sequence)\n else:\n cws = [None] * len(us)\n\n if swds is not None:\n cvs = self.extract_subword_features(\n swds\n ) # sw_chunk -[Embed]-> sw (concatenated subword matrix)\n else:\n cvs = [None] * len(us)\n else:\n if cs is not None:\n ws = self.extract_subword_features(cs)\n else:\n ws = [None] * len(us)\n\n if ds is not None:\n vs = self.extract_subword_features(ds)\n else:\n vs = [None] * len(us)\n\n if sws is not None:\n cws = self.extract_chunk_features(sws)\n else:\n cws = [None] * len(us)\n\n if swds is not None:\n cvs = self.extract_chunk_features(swds)\n else:\n cvs = [None] * len(us)\n\n if not self.reverse:\n ### wsw\n # r @ r$w -> h\n closs, pcs, hs = self.act_and_merge_chunk_features(\n rs, ws, vs, ms, gcs, lengths, get_att_score=False)\n if self.use_rnn2:\n hs = self.rnn2_output(hs, lengths) # h -[RNN]-> h'\n\n # h' @ h'$sw -> h'\n swloss, psws, hs = self.act_and_merge_subword_features(\n hs, cws, cvs, qs, gsws, lengths, get_att_score=False)\n if self.use_rnn3:\n hs = self.rnn3_output(hs, lengths) # h' -[RNN]-> h''\n else:\n closs, pcs, hs = self.act_and_merge_subword_features(\n rs, ws, vs, ms, gcs, lengths, get_att_score=False)\n if self.use_rnn2:\n hs = self.rnn2_output(hs, lengths) # h -[RNN]-> h'\n\n # h' @ h'$sw -> h'\n swloss, psws, hs = self.act_and_merge_chunk_features(\n hs, cws, cvs, qs, gsws, lengths, get_att_score=False)\n if self.use_rnn3:\n hs = self.rnn3_output(hs, lengths) # h' -[RNN]-> h''\n\n ys = self.mlp(hs)\n sloss, pls = self.predict(ys,\n ls=gls,\n lengths=lengths,\n calculate_loss=calculate_loss)\n\n if swloss is not None:\n loss = (1 -\n self.sw_loss_ratio) * sloss + self.sw_loss_ratio * swloss\n else:\n loss = sloss\n\n return loss, pls, pcs, psws\n\n def pad_features(self, us, cs, sws, bs, fs, gls, gcs, gsws):\n batch_first = self.rnn_batch_first\n us = pad_sequence(us, batch_first=batch_first)\n cs = pad_sequence(cs, batch_first=batch_first) if cs else None\n sws = pad_sequence(sws, batch_first=batch_first) if sws else None\n bs = pad_sequence(bs, batch_first=batch_first) if bs else None\n fs = pad_sequence(fs, batch_first=batch_first) if fs else None\n gls = pad_sequence(gls, batch_first=batch_first) if gls else None\n gcs = pad_sequence(gcs, batch_first=batch_first) if gcs else None\n gsws = pad_sequence(gsws, batch_first=batch_first) if gsws else None\n\n return us, cs, sws, bs, fs, gls, gcs, gsws\n\n def trim_features_by_length(self, x, v, gc, length):\n x = x[:length, :] if x is not None else None\n v = v[:, :length, :] if v is not None else None\n gc = gc[:length] if gc is not None else None\n return x, v, gc\n\n def decode(self, us, cs, ds, sws, swds, ms, qs, bs=None, fs=None):\n with torch.no_grad():\n _, ps, _, _ = self.forward(us,\n cs,\n ds,\n sws,\n swds,\n ms,\n qs,\n bs,\n fs,\n calculate_loss=False)\n return ps\n\n def rnn2_output(self, xs, lengths=None):\n if self.rnn_unit_type == 'lstm':\n hs, (hy, cy) = self.rnn2(xs, lengths)\n else:\n hs, hy = self.rnn2(xs)\n return hs\n\n def rnn3_output(self, xs, lengths=None):\n if self.rnn_unit_type == 'lstm':\n hs, (hy, cy) = self.rnn3(xs, lengths)\n else:\n hs, hy = self.rnn3(xs)\n return hs\n\n def extract_token_features(self, us, bs, es, fs):\n return super().extract_features(us, bs, es, fs)\n\n def extract_chunk_features(self, cs):\n xs = []\n for c in cs:\n xe = self.chunk_embed(c) if c.byte().any() else None\n\n if c is not None and self.pretrained_chunk_embed is not None:\n if self.pretrained_embed_usage == ModelUsage.ADD:\n pce = self.pretrained_chunk_embed(c)\n xe = xe + pce\n elif self.pretrained_embed_usage == ModelUsage.CONCAT:\n pce = self.pretrained_chunk_embed(c)\n xe = F.concat((xe, pce), 1)\n xs.append(xe)\n return xs\n\n def extract_subword_features(self, sws):\n xs = []\n for sw in sws:\n xe = self.subword_embed(sw) if sw.byte().any() else None\n\n if sw is not None and self.pretrained_subword_embed is not None:\n if self.pretrained_embed_usage == ModelUsage.ADD:\n pswe = self.pretrained_subword_embed(sw)\n xe = xe + pswe\n elif self.pretrained_embed_usage == ModelUsage.CONCAT:\n pswe = self.pretrained_subword_embed(sw)\n xe = F.concat((xe, pswe), 1)\n xs.append(xe)\n return xs\n\n def act_and_merge_chunk_features(self,\n xs,\n ws,\n vs,\n ms,\n gcs=None,\n lengths=None,\n get_att_score=False):\n hs = []\n pcs = []\n ass = [] # attention scores\n\n device = xs.device\n batch_first = self.rnn_batch_first\n closs = torch.tensor(0, dtype=torch.float, device=device)\n\n if gcs is None:\n gcs = [None] * len(xs)\n for x, w, v, gc, mask, l in zip(xs, ws, vs, gcs, ms, lengths):\n x, v, gc = self.trim_features_by_length(x, v, gc, l)\n\n if w is None and v is None: # no words were found for validation/test data\n a = torch.zeros((len(x), self.chunk_embed_out_size),\n dtype=torch.float,\n device=device)\n pc = torch.zeros(len(x), dtype=int, device=device)\n pcs.append(pc)\n h = torch.cat((x, a), dim=1) # (n, dt) @ (n, dc) => (n, dt+dc)\n hs.append(h)\n continue\n\n if w is not None:\n w = F.dropout(w, p=self.embed_dropout)\n mask_ij = mask[0]\n cl, wl = mask_ij.size()\n w = w[:wl, :]\n\n # calculate weight for w\n\n mask_ij = mask[0]\n if self.use_chunk_attention: # wavg or wcon\n mask_i = mask[1]\n\n w_scores = self.biaffine(\n F.dropout(x, p=self.biaffine_dropout),\n F.dropout(w, p=self.biaffine_dropout)) # (n, m)\n w_scores = w_scores + mask_ij # a masked element becomes 0 after softmax operation\n\n w_weight = F.softmax(w_scores,\n dim=1) # sum(rows[i], cols) == 1\n w_weight = w_weight * mask_i # raw of char w/o no candidate words become a 0 vector\n # print('ww', w_weight.shape, '\\n', w_weight)\n\n elif self.chunk_pooling_type == constants.AVG:\n w_weight = self.normalize(mask_ij)\n\n if not self.use_chunk_concat and self.chunk_vector_dropout > 0:\n mask_drop = torch.ones(w_weight.shape,\n dtype=torch.float,\n device=device)\n for i in range(w_weight.shape[0]):\n if self.chunk_vector_dropout > np.random.rand():\n mask_drop[i] = torch.zeros(w_weight.shape[1],\n dtype=torch.float,\n device=device)\n w_weight *= mask_drop\n\n # calculate weight for v\n\n if self.use_chunk_concat:\n mask_ik = mask[2]\n n = x.shape[0]\n wd = self.chunk_embed_size_merged # w.shape[1]\n if self.chunk_pooling_type == constants.WCON:\n ikj_table = mask[3]\n v_weight0 = torch.cat(\n [\n torch.unsqueeze( # (n, m) -> (n, k)\n w_weight[i][ikj_table[i]],\n dim=0) for i in range(n)\n ],\n dim=0)\n v_weight0 *= mask_ik\n\n else:\n v_weight0 = mask_ik\n\n v_weight = torch.transpose(v_weight0, 1, 0) # (n,k) -> (k,n)\n v_weight = torch.unsqueeze(v_weight, 2) # (k,n) -> (k,n,1)\n v_weight = v_weight.repeat((1, 1, wd)) # (k,n,1) -> (k,n,wd)\n v_weight = torch.transpose(v_weight, 1, 0).reshape(\n (v_weight.size(1), -1)) # (k,n,wd) -> (n,k*wd)\n\n if self.chunk_vector_dropout > 0:\n mask_drop = torch.ones(v_weight.shape,\n dtype=torch.float,\n device=device)\n for i in range(v_weight.shape[0]):\n if self.chunk_vector_dropout > np.random.rand():\n mask_drop[i] = torch.zeros(v_weight.shape[1],\n dtype=torch.float,\n device=device)\n v_weight *= mask_drop\n\n # calculate summary vector a\n\n if self.use_chunk_average: # avg or wavg\n a = torch.matmul(w_weight, w) # (n, m) * (m, dc) => (n, dc)\n\n else: # con or wcon\n v = torch.transpose(v, 1, 0).reshape((v.size(1), -1))\n a = v * v_weight\n\n # get predicted (attended) chunks\n if self.use_chunk_attention: # wavg or wcon\n if self.chunk_pooling_type == constants.WAVG:\n weight = w_weight\n else:\n weight = v_weight0\n\n pc = torch.argmax(weight, dim=1).data.cpu().numpy()\n pcs.append(pc)\n\n if get_att_score:\n ascore = torch.argmax(weight, dim=1).data.cpu().numpy()\n ass.append(ascore)\n\n h = torch.cat((x, a), dim=1) # (n, dt) @ (n, dc) => (n, dt+dc)\n\n hs.append(h)\n\n hs = pad_sequence(hs, batch_first=batch_first)\n\n if closs.data == 0:\n closs = None\n else:\n closs /= len(xs)\n\n if get_att_score:\n return closs, pcs, hs, ass\n else:\n return closs, pcs, hs\n\n def act_and_merge_subword_features(self,\n xs,\n cws,\n cvs,\n qs,\n gsws=None,\n lengths=None,\n get_att_score=False):\n hs = []\n psws = []\n ass = [] # attention scores\n\n device = xs.device\n batch_first = self.rnn_batch_first\n swloss = torch.tensor(0, dtype=torch.float, device=device)\n\n if gsws is None:\n gsws = [None] * len(xs)\n for x, sw, cv, gsw, mask, l in zip(xs, cws, cvs, gsws, qs, lengths):\n x, cv, gsw = self.trim_features_by_length(x, cv, gsw, l)\n\n if sw is None and cv is None: # no sws were found for validation/test data\n a = torch.zeros((len(x), self.subword_embed_out_size),\n dtype=torch.float,\n device=device)\n psw = torch.zeros(len(x), dtype=int, device=device)\n psws.append(psw)\n h = torch.cat((x, a), dim=1) # (n, dt) @ (n, dc) => (n, dt+dc)\n hs.append(h)\n continue\n\n if sw is not None:\n sw = F.dropout(sw, p=self.embed_dropout)\n mask_ij = mask[0]\n cl, swl = mask_ij.size()\n sw = sw[:swl, :]\n\n # calculate weight for subword\n\n mask_ij = mask[0]\n if self.use_subword_attention: # swavg or swcon\n mask_i = mask[1]\n\n sw_scores = self.biaffine(\n F.dropout(x, p=self.biaffine_dropout),\n F.dropout(sw, p=self.biaffine_dropout)) # (n, m)\n sw_scores = sw_scores + mask_ij # a masked element becomes 0 after softmax operation\n\n sw_weight = F.softmax(sw_scores,\n dim=1) # sum(rows[i], cols) == 1\n sw_weight = sw_weight * mask_i # raw of char w/o no candidate sws become a 0 vector\n\n elif self.subword_pooling_type == constants.AVG:\n sw_weight = self.normalize(mask_ij)\n\n if not self.use_subword_concat and self.subword_vector_dropout > 0:\n mask_drop = torch.ones(sw_weight.shape,\n dtype=torch.float,\n device=device)\n for i in range(sw_weight.shape[0]):\n if self.subword_vector_dropout > np.random.rand():\n mask_drop[i] = torch.zeros(sw_weight.shape[1],\n dtype=torch.float,\n device=device)\n sw_weight *= mask_drop\n\n # calculate weight for v\n\n if self.use_subword_concat:\n mask_ik = mask[2]\n n = x.shape[0]\n swd = self.subword_embed_size_merged # sw.shape[1]\n if self.subword_pooling_type == constants.SWCON:\n ikj_table = mask[3]\n cv_weight0 = torch.cat(\n [\n torch.unsqueeze( # (n, m) -> (n, k)\n sw_weight[i][ikj_table[i]],\n dim=0) for i in range(n)\n ],\n dim=0)\n cv_weight0 *= mask_ik\n\n else:\n cv_weight0 = mask_ik\n\n cv_weight = torch.transpose(cv_weight0, 1, 0) # (n,k) -> (k,n)\n cv_weight = torch.unsqueeze(cv_weight, 2) # (k,n) -> (k,n,1)\n cv_weight = cv_weight.repeat(\n (1, 1, swd)) # (k,n,1) -> (k,n,swd)\n cv_weight = torch.transpose(cv_weight, 1, 0).reshape(\n (cv_weight.size(1), -1)) # (k,n,swd) -> (n,k*swd)\n\n if self.subword_vector_dropout > 0:\n mask_drop = torch.ones(cv_weight.shape,\n dtype=torch.float,\n device=device)\n for i in range(cv_weight.shape[0]):\n if self.subword_vector_dropout > np.random.rand():\n mask_drop[i] = torch.zeros(cv_weight.shape[1],\n dtype=torch.float,\n device=device)\n cv_weight *= mask_drop\n\n # calculate summary vector a\n\n if self.use_subword_average: # avg or swavg\n a = torch.matmul(sw_weight, sw) # (n, m) * (m, dc) => (n, dc)\n\n else: # con or swcon\n cv = torch.transpose(cv, 1, 0).reshape((cv.size(1), -1))\n a = cv * cv_weight\n\n # get predicted (attended) sws\n if self.use_subword_attention: # swavg or swcon\n if self.subword_pooling_type == constants.SWAVG:\n weight = sw_weight\n else:\n weight = cv_weight0\n\n psw = torch.argmax(weight, dim=1).data.cpu().numpy()\n psws.append(psw)\n\n if get_att_score:\n ascore = torch.argmax(weight, dim=1).data.cpu().numpy()\n ass.append(ascore)\n\n h = torch.cat((x, a), dim=1) # (n, dt) @ (n, dc) => (n, dt+dc)\n\n hs.append(h)\n\n hs = pad_sequence(hs, batch_first=batch_first)\n\n if swloss.data == 0:\n swloss = None\n else:\n swloss /= len(xs)\n\n if get_att_score:\n return swloss, psws, hs, ass\n else:\n return swloss, psws, hs\n\n def normalize(self, array):\n device = array.device\n denom = torch.sum(array, dim=1, keepdims=True)\n adjust = torch.tensor([[\n torch.tensor(1, dtype=torch.float, device=device)\n if denom.data[i][0] == 0 else torch.tensor(\n 0, dtype=torch.float, device=device)\n ] for i in range(denom.shape[0])],\n dtype=torch.float,\n device=device)\n denom = denom + adjust # avoid zero division\n return torch.div(array, denom)\n" ]
[ [ "torch.div", "torch.nn.CrossEntropyLoss", "torch.nn.functional.softmax", "torch.ones", "torch.transpose", "torch.nn.Module.__init__", "torch.cat", "torch.nn.functional.dropout", "torch.zeros", "torch.nn.utils.rnn.pad_sequence", "torch.sum", "torch.unsqueeze", "torch.tensor", "torch.matmul", "torch.no_grad", "numpy.random.rand", "torch.nn.functional.concat", "torch.argmax" ] ]
ietheredge/VisionEngine
[ "271c8dcaef6eb574e9047fca436d7b13cab75d3b" ]
[ "VisionEngine/utils/eval.py" ]
[ "import tensorflow as tf\nimport math\n\n\ndef embed_images(x, model):\n outputs = [\n model.model.get_layer(\"variational_layer\").output,\n model.model.get_layer(\"variational_layer_1\").output,\n model.model.get_layer(\"variational_layer_2\").output,\n model.model.get_layer(\"variational_layer_3\").output,\n ]\n encoder = tf.keras.Model(model.model.inputs, outputs)\n return encoder.predict(x)\n\n\ndef reconstruct_images(x, model):\n return model.model.predict(x)\n\n\nclass LikeLihoodLayer(tf.keras.layers.Layer):\n def __init__(self, **kwargs):\n super(LikeLihoodLayer, self).__init__(**kwargs)\n self.model_input_shape = [256, 256, 3]\n\n def build(self, input_shape):\n super(LikeLihoodLayer, self).build(input_shape)\n\n def call(self, layer_inputs, **kwargs):\n inputs, outputs = layer_inputs\n mse = -tf.losses.mean_squared_error(inputs, outputs)\n out = 1.0 / (tf.sqrt(2.0 * math.pi)) * tf.exp(-0.5 * (mse) ** 2.0)\n return out\n\n def compute_output_shape(self, input_shape):\n return input_shape[0]\n\n def get_config(self):\n config = {}\n base_config = super(LikeLihoodLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef sample_likelihood(x, model):\n inputs = tf.keras.layers.Flatten()(model.model.input)\n outputs = tf.keras.layers.Flatten()(model.model.output)\n out = LikeLihoodLayer()([inputs, outputs])\n lh_model = tf.keras.Model(model.model.input, out)\n return lh_model.predict(x)\n" ]
[ [ "tensorflow.losses.mean_squared_error", "tensorflow.exp", "tensorflow.keras.Model", "tensorflow.sqrt", "tensorflow.keras.layers.Flatten" ] ]
mengzaiqiao/MultiObjectiveOptimization
[ "7085638b3918506836118f88fed8bb2f8994a499" ]
[ "multi_task/metrics.py" ]
[ "# Adapted from: https://github.com/meetshah1995/pytorch-semseg/blob/master/ptsemseg/metrics.py\n\nfrom losses import l1_loss_instance\nimport numpy as np\n\nclass RunningMetric(object):\n def __init__(self, metric_type, n_classes =None):\n self._metric_type = metric_type\n if metric_type == 'ACC':\n self.accuracy = 0.0\n self.num_updates = 0.0\n if metric_type == 'L1':\n self.l1 = 0.0\n self.num_updates = 0.0\n if metric_type == 'IOU':\n if n_classes is None:\n print('ERROR: n_classes is needed for IOU')\n self.num_updates = 0.0\n self._n_classes = n_classes\n self.confusion_matrix = np.zeros((n_classes, n_classes))\n\n def reset(self):\n if self._metric_type == 'ACC':\n self.accuracy = 0.0\n self.num_updates = 0.0\n if self._metric_type == 'L1':\n self.l1 = 0.0\n self.num_updates = 0.0\n if self._metric_type == 'IOU':\n self.num_updates = 0.0\n self.confusion_matrix = np.zeros((self._n_classes, self._n_classes))\n\n def _fast_hist(self, pred, gt):\n mask = (gt >= 0) & (gt < self._n_classes)\n hist = np.bincount(\n self._n_classes * gt[mask].astype(int) +\n pred[mask], minlength=self._n_classes**2).reshape(self._n_classes, self._n_classes)\n return hist\n\n def update(self, pred, gt):\n if self._metric_type == 'ACC':\n predictions = pred.data.max(1, keepdim=True)[1]\n self.accuracy += (predictions.eq(gt.data.view_as(predictions)).cpu().sum()).item() \n self.num_updates += predictions.shape[0]\n \n if self._metric_type == 'L1':\n _gt = gt.data.cpu().numpy()\n _pred = pred.data.cpu().numpy()\n gti = _gt.astype(np.int32)\n mask = gti!=250\n if np.sum(mask) < 1:\n return\n self.l1 += np.sum( np.abs(gti[mask] - _pred.astype(np.int32)[mask]) ) \n self.num_updates += np.sum(mask)\n\n if self._metric_type == 'IOU':\n _pred = pred.data.max(1)[1].cpu().numpy()\n _gt = gt.data.cpu().numpy()\n for lt, lp in zip(_pred, _gt):\n self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten())\n \n def get_result(self):\n if self._metric_type == 'ACC':\n return {'acc': self.accuracy/self.num_updates}\n if self._metric_type == 'L1':\n return {'l1': self.l1/self.num_updates}\n if self._metric_type == 'IOU':\n acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()\n acc_cls = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iou = np.diag(self.confusion_matrix) / (self.confusion_matrix.sum(axis=1) + self.confusion_matrix.sum(axis=0) - np.diag(self.confusion_matrix)) \n mean_iou = np.nanmean(iou)\n return {'micro_acc': acc, 'macro_acc':acc_cls, 'mIOU': mean_iou}\n\n\ndef get_metrics(params):\n met = {}\n if 'mnist' in params['dataset']:\n for t in params['tasks']:\n met[t] = RunningMetric(metric_type = 'ACC')\n if 'cityscapes' in params['dataset']:\n if 'S' in params['tasks']:\n met['S'] = RunningMetric(metric_type = 'IOU', n_classes=19)\n if 'I' in params['tasks']:\n met['I'] = RunningMetric(metric_type = 'L1')\n if 'D' in params['tasks']:\n met['D'] = RunningMetric(metric_type = 'L1')\n if 'celeba' in params['dataset']:\n for t in params['tasks']:\n met[t] = RunningMetric(metric_type = 'ACC')\n return met" ]
[ [ "numpy.diag", "numpy.zeros", "numpy.sum", "numpy.nanmean" ] ]
wasiahmad/community_question_answering
[ "73d13bc1cdf2ea66d13209c007dcc2767cf2155c" ]
[ "senteval/examples/all.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\n\"\"\"\nInferSent models. See https://github.com/facebookresearch/InferSent.\n\"\"\"\n\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport sys\nimport os\nimport torch\nimport logging\nimport torch.nn as nn\nimport numpy as np\n\n# get models.py from InferSent repo\nfrom sent2vec import Sent2VecSingle, Sent2Vec\nfrom gensen import GenSen, GenSenSingle\nfrom elmo import ELMo\n\n\nclass All2Vec(nn.Module):\n \"\"\"Concat Gensen.\"\"\"\n\n def __init__(self, models):\n \"\"\"A wrapper class for multiple GenSen models.\"\"\"\n super(All2Vec, self).__init__()\n self.models = models\n\n def build_vocab(self, sentences, tokenize=True):\n for name, model in self.models.items():\n if name not in ['elmo']:\n model.build_vocab(sentences, tokenize=tokenize)\n\n def get_representation(\n self, sentences, batch_size,\n tokenize=False, strategy='max'\n ):\n \"\"\"Get model representations.\"\"\"\n representations = []\n for name, model in self.models.items():\n if name == 'elmo':\n embeddings = model.encode(\n sentences, bsize=batch_size,\n tokenize=tokenize\n )\n elif name == 'gensen':\n sentences = [s.lower() for s in sentences]\n _, embeddings = model.get_representation(\n sentences, pool=strategy, return_numpy=True\n )\n else:\n embeddings = model.get_representation(\n sentences, batch_size=batch_size,\n tokenize=tokenize\n )\n representations.append(embeddings)\n\n return np.concatenate(representations, axis=1)\n\n\n# Set PATHs\nPATH_SENTEVAL = '../'\nPATH_TO_DATA = '../data'\nPATH_TO_W2V = '../../glove/glove.840B.300d.txt' # or crawl-300d-2M.vec for V2\n\n# SENT2VEC = ['../data/models/sent2vec/mtask/adv_shared_private/shared.pth',\n# '../data/models/sent2vec/mtask/adv_shared_private/quora.pth',\n# '../data/models/sent2vec/mtask/adv_shared_private/snli.pth',\n# '../data/models/sent2vec/mtask/adv_shared_private/multinli.pth']\n\nSENT2VEC = ['../data/models/sent2vec/mtask/shared_private/shared.pth',\n '../data/models/sent2vec/mtask/shared_private/quora.pth',\n '../data/models/sent2vec/mtask/shared_private/snli.pth',\n '../data/models/sent2vec/mtask/shared_private/multinli.pth']\n\nELMO_WEIGHT = '../data/models/elmo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5'\nELMO_OPTIONS = '../data/models/elmo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json'\n\nFOLDER_PATH = '../data/models/gensen/'\nPRETRAIN_EMB = '../../glove/glove.840B.300d.h5'\nPREFIX1 = 'nli_large_bothskip_parse'\nPREFIX2 = 'nli_large_bothskip'\n\nV = 1 # version of InferSent\n\nassert all([os.path.isfile(path) for path in SENT2VEC]), 'Set MODEL PATHs'\nassert os.path.isfile(ELMO_WEIGHT), 'Set MODEL PATHs'\nassert os.path.isfile(PATH_TO_W2V), 'Set GloVe PATHs'\n\n# import senteval\nsys.path.insert(0, PATH_SENTEVAL)\nimport senteval\n\n\ndef gensen_prepare(params, model, samples):\n print('Preparing task : %s ' % (params.current_task))\n vocab = set()\n for sample in samples:\n if params.current_task != 'TREC':\n sample = ' '.join(sample).lower().split()\n else:\n sample = ' '.join(sample).split()\n for word in sample:\n if word not in vocab:\n vocab.add(word)\n\n vocab.add('<s>')\n vocab.add('<pad>')\n vocab.add('<unk>')\n vocab.add('</s>')\n # If you want to turn off vocab expansion just comment out the below line.\n model.vocab_expansion(vocab)\n\n\ndef prepare(params, samples):\n for name, model in params['model'].models.items():\n if name == 'gensen':\n gensen_prepare(params, model, samples)\n elif name == 'elmo':\n continue\n else:\n model.build_vocab([' '.join(s) for s in samples], tokenize=False)\n\n\ndef batcher(params, batch):\n sentences = [' '.join(s) for s in batch]\n\n # batch contains list of words\n max_tasks = ['MR', 'CR', 'SUBJ', 'MPQA', 'ImageCaptionRetrieval']\n if params.current_task in max_tasks:\n strategy = 'max'\n else:\n strategy = 'last'\n\n embeddings = params['model'].get_representation(\n sentences, params.batch_size,\n tokenize=False, strategy=strategy\n )\n return embeddings\n\n\n\"\"\"\nEvaluation of trained model on Transfer Tasks (SentEval)\n\"\"\"\n\n# define senteval params\nparams_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10}\nparams_senteval['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 64,\n 'tenacity': 5, 'epoch_size': 4}\n# Set up logger\nlogging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)\n\nif __name__ == \"__main__\":\n params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,\n 'pool_type': 'max', 'dpout_model': 0.0, 'version': V}\n\n s2vsingle = [\n Sent2VecSingle(params_model)\n for _ in range(len(SENT2VEC))\n ]\n\n for i in range(len(SENT2VEC)):\n s2vsingle[i].load_state(SENT2VEC[i])\n s2vsingle[i].set_w2v_path(PATH_TO_W2V)\n s2vsingle[i] = s2vsingle[i].cuda()\n\n sent2vec = Sent2Vec(s2vsingle, 'concat')\n\n params_model = {'bsize': 64, 'pool_type': 'mean',\n 'which_layer': 'all',\n 'optfile': ELMO_OPTIONS,\n 'wgtfile': ELMO_WEIGHT}\n\n elmo = ELMo(params_model)\n elmo = elmo.cuda()\n\n gensen_1 = GenSenSingle(\n model_folder=FOLDER_PATH,\n filename_prefix=PREFIX1,\n pretrained_emb=PRETRAIN_EMB,\n cuda=True\n )\n gensen_2 = GenSenSingle(\n model_folder=FOLDER_PATH,\n filename_prefix=PREFIX2,\n pretrained_emb=PRETRAIN_EMB,\n cuda=True\n )\n gensen = GenSen(gensen_1, gensen_2)\n\n models = {\n 'sent2vec': sent2vec,\n 'elmo': elmo,\n 'gensen': gensen\n }\n\n # models = {\n # 'elmo': elmo,\n # 'sent2vec': sent2vec\n # }\n\n # models = {\n # 'elmo': elmo,\n # 'gensen': gensen\n # }\n\n all2vec = All2Vec(models)\n params_senteval['model'] = all2vec.cuda()\n\n se = senteval.engine.SE(params_senteval, batcher, prepare)\n transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',\n 'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',\n 'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',\n 'Length', 'WordContent', 'Depth', 'TopConstituents',\n 'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',\n 'OddManOut', 'CoordinationInversion']\n results_transfer = se.eval(transfer_tasks)\n\n print('--------------------------------------------')\n print('MR [Dev:%.1f/Test:%.1f]' % (results_transfer['MR']['devacc'], results_transfer['MR']['acc']))\n print('CR [Dev:%.1f/Test:%.1f]' % (results_transfer['CR']['devacc'], results_transfer['CR']['acc']))\n print('SUBJ [Dev:%.1f/Test:%.1f]' % (\n results_transfer['SUBJ']['devacc'], results_transfer['SUBJ']['acc']))\n print('MPQA [Dev:%.1f/Test:%.1f]' % (\n results_transfer['MPQA']['devacc'], results_transfer['MPQA']['acc']))\n print('SST2 [Dev:%.1f/Test:%.1f]' % (\n results_transfer['SST2']['devacc'], results_transfer['SST2']['acc']))\n print('SST5 [Dev:%.1f/Test:%.1f]' % (\n results_transfer['SST5']['devacc'], results_transfer['SST5']['acc']))\n print('TREC [Dev:%.1f/Test:%.1f]' % (\n results_transfer['TREC']['devacc'], results_transfer['TREC']['acc']))\n print('MRPC [Dev:%.1f/TestAcc:%.1f/TestF1:%.1f]' % (\n results_transfer['MRPC']['devacc'], results_transfer['MRPC']['acc'], results_transfer['MRPC']['f1']))\n print('SICKRelatedness [Dev:%.3f/Test:%.3f]' % (\n results_transfer['SICKRelatedness']['devpearson'], results_transfer['SICKRelatedness']['pearson']))\n print('SICKEntailment [Dev:%.1f/Test:%.1f]' % (\n results_transfer['SICKEntailment']['devacc'], results_transfer['SICKEntailment']['acc']))\n print('--------------------------------------------')\n print('STS12 [Pearson:%.3f/Spearman:%.3f]' % (\n results_transfer['STS12']['all']['pearson']['mean'], results_transfer['STS12']['all']['spearman']['mean']))\n print('STS13 [Pearson:%.3f/Spearman:%.3f]' % (\n results_transfer['STS13']['all']['pearson']['mean'], results_transfer['STS13']['all']['spearman']['mean']))\n print('STS14 [Pearson:%.3f/Spearman:%.3f]' % (\n results_transfer['STS14']['all']['pearson']['mean'], results_transfer['STS14']['all']['spearman']['mean']))\n print('STS15 [Pearson:%.3f/Spearman:%.3f]' % (\n results_transfer['STS15']['all']['pearson']['mean'], results_transfer['STS15']['all']['spearman']['mean']))\n print('STS16 [Pearson:%.3f/Spearman:%.3f]' % (\n results_transfer['STS16']['all']['pearson']['mean'], results_transfer['STS16']['all']['spearman']['mean']))\n print('STSBenchmark [Dev:%.5f/Pearson:%.5f/Spearman:%.5f]' % (\n results_transfer['STSBenchmark']['devpearson'], results_transfer['STSBenchmark']['pearson'],\n results_transfer['STSBenchmark']['spearman']))\n print('--------------------------------------------')\n print('Length [Dev:%.2f/Test:%.2f]' % (\n results_transfer['Length']['devacc'], results_transfer['Length']['acc']))\n print('WordContent [Dev:%.2f/Test:%.2f]' % (\n results_transfer['WordContent']['devacc'], results_transfer['WordContent']['acc']))\n print('Depth [Dev:%.2f/Test:%.2f]' % (\n results_transfer['Depth']['devacc'], results_transfer['Depth']['acc']))\n print('TopConstituents [Dev:%.2f/Test:%.2f]' % (\n results_transfer['TopConstituents']['devacc'], results_transfer['TopConstituents']['acc']))\n print('BigramShift [Dev:%.2f/Test:%.2f]' % (\n results_transfer['BigramShift']['devacc'], results_transfer['BigramShift']['acc']))\n print('Tense [Dev:%.2f/Test:%.2f]' % (\n results_transfer['Tense']['devacc'], results_transfer['Tense']['acc']))\n print('SubjNumber [Dev:%.2f/Test:%.2f]' % (\n results_transfer['SubjNumber']['devacc'], results_transfer['SubjNumber']['acc']))\n print('ObjNumber [Dev:%.2f/Test:%.2f]' % (\n results_transfer['ObjNumber']['devacc'], results_transfer['ObjNumber']['acc']))\n print('OddManOut [Dev:%.2f/Test:%.2f]' % (\n results_transfer['OddManOut']['devacc'], results_transfer['OddManOut']['acc']))\n print('CoordInversion [Dev:%.2f/Test:%.2f]' % (\n results_transfer['CoordinationInversion']['devacc'], results_transfer['CoordinationInversion']['acc']))\n print('--------------------------------------------')\n" ]
[ [ "numpy.concatenate" ] ]
gan3sh500/keras-YOLOv3-model-set
[ "1a1108c52073c01c130618dc68607f455adadf28" ]
[ "common/utils.py" ]
[ "#!/usr/bin/python3\n# -*- coding=utf-8 -*-\n\"\"\"Miscellaneous utility functions.\"\"\"\n\nfrom PIL import Image\nimport numpy as np\nimport os, cv2, colorsys\nfrom matplotlib.colors import rgb_to_hsv, hsv_to_rgb\nfrom common.backbones.efficientnet import swish\nfrom common.backbones.mobilenet_v3 import hard_sigmoid, hard_swish\nfrom yolo4.models.layers import mish\nimport tensorflow as tf\n\n\ndef optimize_tf_gpu(tf, K):\n if tf.__version__.startswith('2'):\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_virtual_device_configuration(gpu, [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=10000)])\n #tf.config.experimental.set_memory_growth(gpu, True)\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)\n else:\n config = tf.ConfigProto()\n config.gpu_options.allow_growth=True #dynamic alloc GPU resource\n config.gpu_options.per_process_gpu_memory_fraction = 0.9 #GPU memory threshold 0.3\n session = tf.Session(config=config)\n\n # set session\n K.set_session(session)\n\n\ndef get_custom_objects():\n '''\n form up a custom_objects dict so that the customized\n layer/function call could be correctly parsed when keras\n .h5 model is loading or converting\n '''\n custom_objects_dict = {\n 'tf': tf,\n 'swish': swish,\n 'hard_sigmoid': hard_sigmoid,\n 'hard_swish': hard_swish,\n 'mish': mish\n }\n\n return custom_objects_dict\n\n\ndef get_multiscale_list():\n input_shape_list = [(320,320), (352,352), (384,384), (416,416), (448,448), (480,480), (512,512), (544,544), (576,576), (608,608)]\n\n return input_shape_list\n\n\ndef resize_anchors(base_anchors, target_shape, base_shape=(416,416)):\n '''\n original anchor size is clustered from COCO dataset\n under input shape (416,416). We need to resize it to\n our train input shape for better performance\n '''\n return np.around(base_anchors*target_shape[::-1]/base_shape[::-1])\n\n\ndef get_classes(classes_path):\n '''loads the classes'''\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\ndef get_anchors(anchors_path):\n '''loads the anchors from a file'''\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\ndef get_colors(class_names):\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(class_names), 1., 1.)\n for x in range(len(class_names))]\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n colors))\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\n np.random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.\n np.random.seed(None) # Reset seed to default.\n return colors\n\ndef get_dataset(annotation_file, shuffle=True):\n with open(annotation_file) as f:\n lines = f.readlines()\n lines = [line.strip() for line in lines]\n\n if shuffle:\n np.random.seed(10101)\n np.random.shuffle(lines)\n np.random.seed(None)\n\n return lines\n\ndef draw_label(image, text, color, coords):\n font = cv2.FONT_HERSHEY_PLAIN\n font_scale = 1.\n (text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=1)[0]\n\n padding = 5\n rect_height = text_height + padding * 2\n rect_width = text_width + padding * 2\n\n (x, y) = coords\n\n cv2.rectangle(image, (x, y), (x + rect_width, y - rect_height), color, cv2.FILLED)\n cv2.putText(image, text, (x + padding, y - text_height + padding), font,\n fontScale=font_scale,\n color=(255, 255, 255),\n lineType=cv2.LINE_AA)\n\n return image\n\ndef draw_boxes(image, boxes, classes, scores, class_names, colors, show_score=True):\n if classes is None or len(classes) == 0:\n return image\n\n for box, cls, score in zip(boxes, classes, scores):\n xmin, ymin, xmax, ymax = box\n\n class_name = class_names[cls]\n if show_score:\n label = '{} {:.2f}'.format(class_name, score)\n else:\n label = '{}'.format(class_name)\n #print(label, (xmin, ymin), (xmax, ymax))\n\n # if no color info, use black(0,0,0)\n if colors == None:\n color = (0,0,0)\n else:\n color = colors[cls]\n cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 1, cv2.LINE_AA)\n image = draw_label(image, label, color, (xmin, ymin))\n\n return image\n\n" ]
[ [ "numpy.random.seed", "numpy.around", "tensorflow.config.experimental.list_physical_devices", "numpy.random.shuffle", "tensorflow.config.experimental.VirtualDeviceConfiguration", "tensorflow.ConfigProto", "tensorflow.Session", "numpy.array", "tensorflow.__version__.startswith" ] ]
werlang/Gas-Station-BinanceSmartChain
[ "4462dfcae2407d4785e333bdc88b0486e1095896" ]
[ "gasPriceApi.py" ]
[ "import time\nimport sys\nimport json\nimport math\nimport traceback\nimport os\nimport pandas as pd\nimport numpy as np\nfrom web3 import Web3, HTTPProvider\nfrom web3.middleware import geth_poa_middleware\n## newly added packages for api\nimport click\nimport logging\nfrom threading import Thread\nfrom sanic import Sanic, response\nfrom retry import retry\n\nweb3 = Web3(HTTPProvider('https://bsc-dataseed.binance.org'))\n### These are the threholds used for % blocks accepting to define the recommended gas prices. can be edited here if desired\n\nweb3.middleware_onion.inject(geth_poa_middleware, layer=0)\n\napp = Sanic(name='bsc_gas_oracle')\nlog = logging.getLogger('sanic.error')\napp.config.LOGO = ''\nstats = {}\n\nSAFELOW = 35\nSTANDARD = 60\nFAST = 90\n\nclass Timers():\n \"\"\"\n class to keep track of time relative to network block\n \"\"\"\n def __init__(self, start_block):\n self.start_block = start_block\n self.current_block = start_block\n self.process_block = start_block\n\n def update_time(self, block):\n self.current_block = block\n self.process_block = self.process_block + 1\n \n \nclass CleanTx():\n \"\"\"transaction object / methods for pandas\"\"\"\n def __init__(self, tx_obj):\n self.hash = tx_obj.hash\n self.block_mined = tx_obj.blockNumber\n self.gas_price = tx_obj['gasPrice']\n self.round_gp_10gwei()\n \n def to_dataframe(self):\n data = {self.hash: {'block_mined':self.block_mined, 'gas_price':self.gas_price, 'round_gp_10gwei':self.gp_10gwei}}\n return pd.DataFrame.from_dict(data, orient='index')\n\n def round_gp_10gwei(self):\n \"\"\"Rounds the gas price to gwei\"\"\"\n gp = self.gas_price/1e8\n # if gp >= 1 and gp < 10:\n # gp = np.floor(gp)\n # elif gp >= 10:\n # gp = gp/10\n # gp = np.floor(gp)\n # gp = gp*10\n # else:\n # gp = 0\n self.gp_10gwei = gp\n\nclass CleanBlock():\n \"\"\"block object/methods for pandas\"\"\"\n def __init__(self, block_obj, timemined, mingasprice=None):\n self.block_number = block_obj.number \n self.time_mined = timemined \n self.blockhash = block_obj.hash\n self.mingasprice = mingasprice\n \n def to_dataframe(self):\n data = {0:{'block_number':self.block_number, 'blockhash':self.blockhash, 'time_mined':self.time_mined, 'mingasprice':self.mingasprice}}\n return pd.DataFrame.from_dict(data, orient='index')\n\ndef write_to_json(gprecs, prediction_table):\n \"\"\"write json data\"\"\"\n try:\n prediction_table['gasprice'] = prediction_table['gasprice']/10\n prediction_tableout = prediction_table.to_json(orient='records')\n filepath_gprecs = 'ethgasAPI.json'\n filepath_prediction_table = 'predictTable.json'\n \n with open(filepath_gprecs, 'w') as outfile:\n json.dump(gprecs, outfile)\n\n with open(filepath_prediction_table, 'w') as outfile:\n outfile.write(prediction_tableout)\n\n except Exception as e:\n print(e)\n\ndef process_block_transactions(block):\n \"\"\"get tx data from block\"\"\"\n block_df = pd.DataFrame()\n block_obj = web3.eth.getBlock(block, True)\n for transaction in block_obj.transactions:\n clean_tx = CleanTx(transaction)\n block_df = block_df.append(clean_tx.to_dataframe(), ignore_index = False)\n block_df['time_mined'] = block_obj.timestamp\n return(block_df, block_obj)\n\ndef process_block_data(block_df, block_obj):\n \"\"\"process block to dataframe\"\"\"\n if len(block_obj.transactions) > 0:\n x = block_df['round_gp_10gwei']\n masked_a = np.ma.masked_array(x, mask=x==0)\n block_mingasprice = masked_a.min()\n # print('------------------block_mingasprice------------------------')\n # print(masked_a)\n # print('-----------------------------------------------------------')\n # print(block_df['round_gp_10gwei'])\n # print('-----------------------------------------------------------')\n # print(block_mingasprice)\n # print('-----------------------------------------------------------')\n else:\n block_mingasprice = np.nan\n \n timemined = block_df['time_mined'].min()\n # print('------------------timemined------------------------')\n # print(block_df['time_mined'])\n # print('-----------------------------------------------------------')\n # print(timemined)\n clean_block = CleanBlock(block_obj, timemined, block_mingasprice)\n return(clean_block.to_dataframe())\n\ndef get_hpa(gasprice, hashpower):\n \"\"\"gets the hash power accpeting the gas price over last 200 blocks\"\"\"\n hpa = hashpower.loc[gasprice >= hashpower.index, 'hashp_pct']\n if gasprice > hashpower.index.max():\n hpa = 100\n elif gasprice < hashpower.index.min():\n hpa = 0\n else:\n hpa = hpa.max()\n return int(hpa)\n\ndef analyze_last200blocks(block, blockdata):\n recent_blocks = blockdata.loc[blockdata['block_number'] > (block-200), ['mingasprice', 'block_number', 'time_mined']]\n #create hashpower accepting dataframe based on mingasprice accepted in block\n hashpower = recent_blocks.groupby('mingasprice').count()\n hashpower = hashpower.rename(columns={'block_number': 'count'})\n hashpower['cum_blocks'] = hashpower['count'].cumsum()\n totalblocks = hashpower['count'].sum()\n hashpower['hashp_pct'] = hashpower['cum_blocks']/totalblocks*100\n #get avg blockinterval time\n blockinterval = recent_blocks.sort_values('block_number').diff()\n blockinterval.loc[blockinterval['block_number'] > 1, 'time_mined'] = np.nan\n blockinterval.loc[blockinterval['time_mined'] < 0, 'time_mined'] = np.nan\n avg_timemined = blockinterval['time_mined'].mean()\n if np.isnan(avg_timemined):\n avg_timemined = 15\n return(hashpower, avg_timemined)\n\n\ndef make_predictTable(block, alltx, hashpower, avg_timemined):\n\n #predictiontable\n predictTable = pd.DataFrame({'gasprice' : range(100, 1010, 10)})\n ptable2 = pd.DataFrame({'gasprice' : range(0, 100, 1)})\n predictTable = predictTable.append(ptable2).reset_index(drop=True)\n predictTable = predictTable.sort_values('gasprice').reset_index(drop=True)\n predictTable['hashpower_accepting'] = predictTable['gasprice'].apply(get_hpa, args=(hashpower,))\n return(predictTable)\n\ndef get_gasprice_recs(prediction_table, block_time, block):\n \n def get_safelow():\n series = prediction_table.loc[prediction_table['hashpower_accepting'] >= SAFELOW, 'gasprice']\n safelow = series.min()\n return float(safelow)\n\n def get_average():\n series = prediction_table.loc[prediction_table['hashpower_accepting'] >= STANDARD, 'gasprice']\n average = series.min()\n return float(average)\n\n def get_fast():\n series = prediction_table.loc[prediction_table['hashpower_accepting'] >= FAST, 'gasprice']\n fastest = series.min()\n return float(fastest)\n\n def get_fastest():\n hpmax = prediction_table['hashpower_accepting'].max()\n fastest = prediction_table.loc[prediction_table['hashpower_accepting'] == hpmax, 'gasprice'].values[0]\n return float(fastest) \n \n gprecs = {}\n gprecs['safeLow'] = get_safelow()/10\n gprecs['standard'] = get_average()/10\n gprecs['fast'] = get_fast()/10\n gprecs['fastest'] = get_fastest()/10\n gprecs['block_time'] = block_time\n gprecs['blockNum'] = block\n return(gprecs)\n\n@retry(Exception, delay=1, logger=log)\ndef master_control():\n\n def init (block):\n nonlocal alltx\n nonlocal blockdata\n print(\"\\n\\n**** ETH Gas Station Express Oracle ****\")\n print (\"\\nSafelow = \" +str(SAFELOW)+ \"% of blocks accepting. Usually confirms in less than 30min.\")\n print (\"Standard= \" +str(STANDARD)+ \"% of blocks accepting. Usually confirms in less than 5 min.\")\n print (\"Fast = \" +str(FAST)+ \"% of blocks accepting. Usually confirms in less than 1 minute\")\n print (\"Fastest = all blocks accepting. As fast as possible but you are probably overpaying.\")\n print(\"\\nnow loading gasprice data from last 50 blocks...give me a minute\")\n\n # start a little early with 50 past blocks\n for pastblock in range((block-50), (block), 1):\n (mined_blockdf, block_obj) = process_block_transactions(pastblock)\n alltx = alltx.combine_first(mined_blockdf)\n block_sumdf = process_block_data(mined_blockdf, block_obj)\n blockdata = blockdata.append(block_sumdf, ignore_index = True)\n print (\"done. now reporting gasprice recs in gwei: \\n\")\n \n print (\"\\npress ctrl-c at any time to stop monitoring\\n\")\n print (\"**** And the oracle says...**** \\n\")\n \n\n \n def append_new_tx(clean_tx):\n nonlocal alltx\n if not clean_tx.hash in alltx.index:\n alltx = alltx.append(clean_tx.to_dataframe(), ignore_index = False)\n \n def update_dataframes(block):\n global stats\n nonlocal alltx\n nonlocal blockdata\n nonlocal timer\n\n try:\n #get minedtransactions and blockdata from previous block\n mined_block_num = block-3\n (mined_blockdf, block_obj) = process_block_transactions(mined_block_num)\n\n alltx = alltx.combine_first(mined_blockdf)\n\n\n #process block data\n block_sumdf = process_block_data(mined_blockdf, block_obj)\n\n #add block data to block dataframe \n blockdata = blockdata.append(block_sumdf, ignore_index = True)\n\n # slice blockdata to 200 last blocks to avoid growing memory consumption\n blockdata = blockdata.iloc[-200:,:]\n # with open('blockdata.json', 'w') as outfile:\n # import json\n # j = blockdata.to_json(orient='split')\n # p = json.loads(j)\n # t = json.dumps(p)\n # outfile.write(str(t))\n\n\n #get hashpower table from last 200 blocks\n (hashpower, block_time) = analyze_last200blocks(block, blockdata)\n predictiondf = make_predictTable(block, alltx, hashpower, block_time)\n\n #get gpRecs\n stats = get_gasprice_recs (predictiondf, block_time, block)\n print(stats)\n\n #every block, write gprecs, predictions \n write_to_json(stats, predictiondf)\n return True\n\n except: \n print(traceback.format_exc())\n\n alltx = pd.DataFrame()\n blockdata = pd.DataFrame()\n timer = Timers(web3.eth.blockNumber) \n start_time = time.time()\n init (web3.eth.blockNumber)\n \n while True:\n try:\n block = web3.eth.blockNumber\n if (timer.process_block < block):\n updated = update_dataframes(timer.process_block)\n timer.process_block = timer.process_block + 1\n else:\n # rest a little more if there are no blocks available\n time.sleep(10)\n except:\n pass\n\n time.sleep(1)\n\[email protected]('/')\nasync def api(request):\n if len(stats) == 0:\n import json\n with open('ethgasAPI.json') as file:\n data = json.load(file)\n return response.json(data)\n return response.json(stats)\n \n\[email protected]('/health')\nasync def health(request):\n return response.json({'health': stats['health']}, status=200 if stats['health'] else 503)\n\[email protected]()\[email protected]('--host', '-h', default='127.0.0.1')\[email protected]('--port', '-p', default=8097)\n\ndef main(host, port):\n bg = Thread(target=master_control, args=())\n bg.daemon = True\n bg.start()\n app.run(host=host, port=port, access_log=False)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.isnan", "numpy.ma.masked_array", "pandas.DataFrame", "pandas.DataFrame.from_dict" ] ]
Storiesbyharshit/Deep-Learning
[ "2464e3354c8c2c99237d76b47264d509cc1a7cef" ]
[ "Deep Learning using Tensorflow Keras/Churn-Model-using-ANN/ANN - Churn Modelling.py" ]
[ "\r\n\r\n# Importing the libraries \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\r\n\r\n\r\n\r\n\r\n\r\n# Importing the datasetm\r\ndataset = pd.read_csv(r'C:\\Users\\IDEAPAD 320\\Desktop\\datasets\\Churn_Modelling.csv')\r\nX = dataset.iloc[:, 3:13].values\r\ny = dataset.iloc[:, 13].values\r\n\r\n\r\n\r\n\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\n\r\nfrom sklearn.compose import ColumnTransformer\r\n\r\n\r\n\r\nlabelencoder_X_2 = LabelEncoder()\r\n\r\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\r\n\r\n\r\n\r\ncolumnTransformer = ColumnTransformer([('encoder', OneHotEncoder(), [1])], remainder='passthrough')\r\n\r\nX = np.array(columnTransformer.fit_transform(X), dtype = np.str)\r\n\r\nX = X[:, 1:]\r\n\r\n\r\n\r\n\r\n\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\r\n\r\n\r\n\r\n\r\n\r\n# Feature Scaling\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc = StandardScaler() \r\nX_train = sc.fit_transform(X_train)\r\nX_test = sc.transform(X_test) \r\n\r\n\r\n\r\n\r\n\r\n# Importing the Keras libraries and packages\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Dropout\r\n\r\n\r\n\r\n\r\n\r\n# Initialising the ANN\r\nclassifier = Sequential()\r\n\r\n# Adding the input layer and the first hidden layer\r\nclassifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim = 11))\r\n\r\n\r\n# Adding the second hidden layer\r\nclassifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu'))\r\n\r\n\r\n# Adding the output layer\r\nclassifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))\r\n\r\n# Compiling the ANN\r\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\r\n\r\n\r\n\r\n\r\n\r\n# Fitting the ANN to the Training set\r\nclassifier.fit(X_train, y_train, batch_size = 10, nb_epoch = 100)\r\n\r\n\r\n\r\n\r\n\r\n# Predicting the Test set results\r\ny_pred = classifier.predict(X_test)\r\ny_pred = (y_pred > 0.5)\r\n\r\n\r\n\r\n\r\n\r\n# Making the Confusion Matrix\r\nfrom sklearn.metrics import confusion_matrix\r\ncm = confusion_matrix(y_test, y_pred)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ncorrect_prediction = cm[0][0]+cm[1][1]\r\nwrong_prediction = cm[0][1]+cm[1][0]\r\ntotal = correct_prediction + wrong_prediction\r\n\r\n\r\n\r\n\r\n\r\ncorrect_prediction\r\n\r\n\r\n\r\n\r\nwrong_prediction\r\n\r\n\r\n\r\n\r\n\r\naccuracy = (correct_prediction/total)*100\r\n\r\n\r\n\r\n\r\n\r\naccuracy\r\n\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "pandas.read_csv", "sklearn.preprocessing.OneHotEncoder", "sklearn.metrics.confusion_matrix", "sklearn.model_selection.train_test_split", "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.LabelEncoder" ] ]
mariaborbones/AIF360
[ "05b3d155aa89a1b173ac3f1ec42110a899710cf8" ]
[ "aif360/datasets/standard_dataset.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom logging import warn\n\nimport numpy as np\nimport pandas as pd\n\nfrom aif360.datasets import BinaryLabelDataset\n\n\nclass StandardDataset(BinaryLabelDataset):\n \"\"\"Base class for every :obj:`BinaryLabelDataset` provided out of the box by\n aif360.\n\n It is not strictly necessary to inherit this class when adding custom\n datasets but it may be useful.\n\n This class is very loosely based on code from\n https://github.com/algofairness/fairness-comparison.\n \"\"\"\n\n def __init__(self, df, label_name, favorable_classes,\n protected_attribute_names, privileged_classes,\n instance_weights_name='', categorical_features=[],\n features_to_keep=[], features_to_drop=[], na_values=[],\n custom_preprocessing=None, metadata=None):\n \"\"\"\n Subclasses of StandardDataset should perform the following before\n calling `super().__init__`:\n\n 1. Load the dataframe from a raw file.\n\n Then, this class will go through a standard preprocessing routine which:\n\n 2. (optional) Performs some dataset-specific preprocessing (e.g.\n renaming columns/values, handling missing data).\n\n 3. Drops unrequested columns (see `features_to_keep` and\n `features_to_drop` for details).\n\n 4. Drops rows with NA values.\n\n 5. Creates a one-hot encoding of the categorical variables.\n\n 6. Maps protected attributes to binary privileged/unprivileged\n values (1/0).\n\n 7. Maps labels to binary favorable/unfavorable labels (1/0).\n\n Args:\n df (pandas.DataFrame): DataFrame on which to perform standard\n processing.\n label_name: Name of the label column in `df`.\n favorable_classes (list or function): Label values which are\n considered favorable or a boolean function which returns `True`\n if favorable. All others are unfavorable. Label values are\n mapped to 1 (favorable) and 0 (unfavorable) if they are not\n already binary and numerical.\n protected_attribute_names (list): List of names corresponding to\n protected attribute columns in `df`.\n privileged_classes (list(list or function)): Each element is\n a list of values which are considered privileged or a boolean\n function which return `True` if privileged for the corresponding\n column in `protected_attribute_names`. All others are\n unprivileged. Values are mapped to 1 (privileged) and 0\n (unprivileged) if they are not already numerical.\n instance_weights_name (optional): Name of the instance weights\n column in `df`.\n categorical_features (optional, list): List of column names in the\n DataFrame which are to be expanded into one-hot vectors.\n features_to_keep (optional, list): Column names to keep. All others\n are dropped except those present in `protected_attribute_names`,\n `categorical_features`, `label_name` or `instance_weights_name`.\n Defaults to all columns if not provided.\n features_to_drop (optional, list): Column names to drop. *Note: this\n overrides* `features_to_keep`.\n na_values (optional): Additional strings to recognize as NA. See\n :func:`pandas.read_csv` for details.\n custom_preprocessing (function): A function object which\n acts on and returns a DataFrame (f: DataFrame -> DataFrame). If\n `None`, no extra preprocessing is applied.\n metadata (optional): Additional metadata to append.\n \"\"\"\n # 2. Perform dataset-specific preprocessing\n if custom_preprocessing:\n df = custom_preprocessing(df)\n\n # 3. Drop unrequested columns\n features_to_keep = features_to_keep or df.columns.tolist()\n keep = (set(features_to_keep) | set(protected_attribute_names)\n | set(categorical_features) | set([label_name]))\n if instance_weights_name:\n keep |= set([instance_weights_name])\n df = df[sorted(keep - set(features_to_drop), key=df.columns.get_loc)]\n\n # 4. Remove any rows that have missing data.\n dropped = df.dropna()\n count = df.shape[0] - dropped.shape[0]\n if count > 0:\n warn(\"Missing Data: {} rows removed from {}.\".format(count,\n type(self).__name__))\n df = dropped\n\n # 5. Create a one-hot encoding of the categorical variables.\n df = pd.get_dummies(df, columns=categorical_features, prefix_sep='=')\n\n # 6. Map protected attributes to privileged/unprivileged\n privileged_protected_attributes = []\n unprivileged_protected_attributes = []\n for attr, vals in zip(protected_attribute_names, privileged_classes):\n privileged_values = [1.]\n unprivileged_values = [0.]\n if callable(vals):\n df[attr] = df[attr].apply(vals)\n elif np.issubdtype(df[attr].dtype, np.number):\n # this attribute is numeric; no remapping needed\n privileged_values = vals\n unprivileged_values = list(set(df[attr]).difference(vals))\n else:\n # find all instances which match any of the attribute values\n priv = np.logical_or.reduce(np.equal.outer(vals, df[attr]))\n df.loc[priv, attr] = privileged_values[0]\n df.loc[~priv, attr] = unprivileged_values[0]\n\n privileged_protected_attributes.append(\n np.array(privileged_values, dtype=np.float64))\n unprivileged_protected_attributes.append(\n np.array(unprivileged_values, dtype=np.float64))\n\n # 7. Make labels binary\n favorable_label = 1.\n unfavorable_label = 0.\n if callable(favorable_classes):\n df[label_name] = df[label_name].apply(favorable_classes)\n elif np.issubdtype(df[label_name], np.number) and len(set(df[label_name])) == 2:\n # labels are already binary; don't change them\n favorable_label = favorable_classes[0]\n unfavorable_label = set(df[label_name]).difference(favorable_classes).pop()\n else:\n # find all instances which match any of the favorable classes\n pos = np.logical_or.reduce(np.equal.outer(favorable_classes,\n df[label_name]))\n df.loc[pos, label_name] = favorable_label\n df.loc[~pos, label_name] = unfavorable_label\n\n super(StandardDataset, self).__init__(df=df, label_names=[label_name],\n protected_attribute_names=protected_attribute_names,\n privileged_protected_attributes=privileged_protected_attributes,\n unprivileged_protected_attributes=unprivileged_protected_attributes,\n instance_weights_name=instance_weights_name,\n favorable_label=favorable_label,\n unfavorable_label=unfavorable_label, metadata=metadata)\n" ]
[ [ "numpy.issubdtype", "numpy.array", "numpy.equal.outer", "pandas.get_dummies" ] ]
shfshf/ner_s2s
[ "a04311310bddf396b551969fd1e63fdb3fc2ca0b" ]
[ "ner_s2s/ner_estimator/algorithms/model.py" ]
[ "from pathlib import Path\n\nimport tensorflow as tf\nimport numpy as np\nfrom ner_s2s.metrics import precision, recall, f1, correct_rate\n\n\nclass Model(object):\n @classmethod\n def default_params(cls):\n return {}\n\n @classmethod\n def get_model_name(cls):\n return cls.__name__\n\n @classmethod\n def model_fn(cls, features, labels, mode, params):\n instance = cls(features, labels, mode, params)\n return instance()\n\n def __init__(self, features, labels, mode, params):\n self.features = features\n self.labels = labels\n self.mode = mode\n self.params = params\n\n def input_layer(self):\n # data = np.loadtxt(self.params['vocab'], dtype=np.unicode, encoding=None)\n data = self.params[\"vocab_data\"]\n mapping_strings = tf.Variable(data)\n vocab_words = tf.contrib.lookup.index_table_from_tensor(\n mapping_strings, num_oov_buckets=1\n )\n\n # Word Embeddings\n words = tf.identity(self.features[\"words\"], name=\"input_words\")\n word_ids = vocab_words.lookup(words)\n\n #\n # raw_nwords = tf.identity(features['words_len'], name='input_words_len')\n # nwords = tf.feature_column.input_layer({'words_len': raw_nwords}, params['words_len_feature_columns'])\n # nwords = tf.reshape(nwords, [-1])\n # nwords = tf.to_int32(nwords)\n\n # words = features['words']\n # words = tf.convert_to_tensor(words)\n #\n # nwords = features['words_len']\n # nwords = tf.convert_to_tensor(nwords)\n\n nwords = tf.identity(self.features[\"words_len\"], name=\"input_words_len\")\n\n # get tag info\n # with Path(self.params['tags']).open() as f:\n indices = [\n idx\n for idx, tag in enumerate(self.params[\"tags_data\"])\n if tag.strip() != \"O\"\n ]\n num_tags = len(indices) + 1\n\n # # true tags to ids\n # if self.mode == tf.estimator.ModeKeys.PREDICT:\n # true_tag_ids = 0\n # else:\n # true_tag_ids = self.tag2id(self.labels)\n\n return indices, num_tags, word_ids, nwords\n\n def embedding_layer(self, word_ids):\n # load pre-trained data from file\n # glove = np.load(params['glove'])['embeddings'] # np.array\n\n # training the embedding during training\n glove = np.zeros(\n (self.params[\"embedding_vocabulary_size\"], self.params[\"embedding_dim\"]),\n dtype=np.float32,\n )\n\n # Add OOV word embedding\n embedding_array = np.vstack([glove, [[0.0] * self.params[\"embedding_dim\"]]])\n\n embedding_variable = tf.Variable(\n embedding_array, dtype=tf.float32, trainable=True\n )\n\n # embedding_variable = tf.get_variable(\n # 'embedding_variable',\n # shape=(self.params[\"embedding_vocabulary_size\"] + 1, self.params[\"embedding_dim\"]),\n # dtype=tf.float32,\n # initializer=tf.contrib.layers.xavier_initializer(),\n # regularizer=tf.contrib.layers.l2_regularizer(self.params[\"regularizer_rate\"]),\n # trainable=True\n # )\n\n embeddings = tf.nn.embedding_lookup(embedding_variable, word_ids)\n\n return embeddings\n\n def dropout_layer(self, data):\n training = self.mode == tf.estimator.ModeKeys.TRAIN\n output = tf.layers.dropout(data, rate=self.params[\"dropout\"], training=training)\n\n return output\n\n def layer_normalization_layer(self, data):\n output = tf.contrib.layers.layer_norm(data)\n\n return output\n\n def dense_layer(self, data, num_tags):\n logits = tf.layers.dense(data, num_tags)\n\n return logits\n\n def load_tag_data(self):\n # data = np.loadtxt(self.params['tags'], dtype=np.unicode, encoding=None)\n data = self.params[\"tags_data\"]\n mapping_strings = tf.Variable(data)\n\n return mapping_strings\n\n def load_word_data(self):\n data = np.loadtxt(self.params[\"words\"], dtype=np.unicode, encoding=None)\n mapping_strings = tf.Variable(data.reshape((-1,)))\n\n return mapping_strings\n\n def tag2id(self, labels, name=None):\n mapping_strings = self.load_tag_data()\n vocab_tags = tf.contrib.lookup.index_table_from_tensor(\n mapping_strings, name=name\n )\n\n tags = vocab_tags.lookup(labels)\n\n return tags\n\n def id2tag(self, pred_ids, name=None):\n mapping_strings = self.load_tag_data()\n reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_tensor(\n mapping_strings, name=name\n )\n\n pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))\n\n return pred_strings\n\n def id2word(self, word_ids, name=None):\n mapping_strings = self.load_word_data()\n reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_tensor(\n mapping_strings, name=name\n )\n\n word_strings = reverse_vocab_tags.lookup(tf.to_int64(word_ids))\n\n return word_strings\n\n def loss_layer(self, preds, ground_true, nwords, crf_params):\n with tf.name_scope(\"CRF_log_likelihood\"):\n log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(\n preds, ground_true, nwords, crf_params\n )\n\n loss = tf.reduce_mean(-log_likelihood)\n\n # regularizer = tf.contrib.layers.l2_regularizer(0.001)\n # reg = regularizer(embedding_variable)\n # loss += reg\n\n return loss\n\n def crf_decode_layer(self, logits, crf_params, nwords):\n with tf.name_scope(\"CRF_decode\"):\n pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords)\n\n return pred_ids\n\n def compute_metrics(self, tags, pred_ids, num_tags, indices, nwords):\n weights = tf.sequence_mask(nwords)\n\n # metrics_correct_rate, golden, predict = correct_rate(tags, pred_ids)\n # metrics_correct_rate = correct_rate(tags, pred_ids, weights)\n\n metrics = {\n \"acc\": tf.metrics.accuracy(tags, pred_ids, weights),\n \"precision\": precision(tags, pred_ids, num_tags, indices, weights),\n \"recall\": recall(tags, pred_ids, num_tags, indices, weights),\n \"f1\": f1(tags, pred_ids, num_tags, indices, weights),\n \"correct_rate\": correct_rate(tags, pred_ids, weights),\n # 'golden': (golden, tf.zeros([], tf.int32)),\n # 'predict': (predict, tf.zeros([], tf.int32))\n }\n\n for metric_name, op in metrics.items():\n tf.summary.scalar(metric_name, op[1])\n\n return metrics\n\n def call(self, embeddings, nwords):\n raise NotImplementedError\n\n def __call__(self):\n with tf.variable_scope(\"task_independent\"):\n indices, num_tags, word_ids, nwords = self.input_layer()\n\n embeddings = self.embedding_layer(word_ids)\n\n data = self.call(embeddings, nwords)\n\n data = self.dropout_layer(data)\n data = self.layer_normalization_layer(data)\n\n with tf.variable_scope(\"task_dependent\"):\n logits = self.dense_layer(data, num_tags)\n\n crf_params = tf.get_variable(\"crf\", [num_tags, num_tags], dtype=tf.float32)\n\n pred_ids = self.crf_decode_layer(logits, crf_params, nwords)\n\n pred_strings = self.id2tag(pred_ids, name=\"predict\")\n\n # word_strings = self.id2word(word_ids, name='word_strings')\n\n # print(word_strings)\n\n if self.mode == tf.estimator.ModeKeys.PREDICT:\n\n predictions = {\"pred_ids\": pred_ids, \"tags\": pred_strings}\n return tf.estimator.EstimatorSpec(self.mode, predictions=predictions)\n else:\n # true_tag_ids = self.labels\n true_tag_ids = self.tag2id(self.labels, \"labels\")\n\n # print(pred_strings)\n # print(self.labels)\n\n loss = self.loss_layer(logits, true_tag_ids, nwords, crf_params)\n\n metrics = self.compute_metrics(\n true_tag_ids, pred_ids, num_tags, indices, nwords\n )\n\n if self.mode == tf.estimator.ModeKeys.EVAL:\n\n return tf.estimator.EstimatorSpec(\n self.mode, loss=loss, eval_metric_ops=metrics\n )\n\n elif self.mode == tf.estimator.ModeKeys.TRAIN:\n\n optimizer_params = self.params.get(\"optimizer_params\", {})\n global_step = tf.train.get_or_create_global_step()\n\n # apply learning rate decay if it's setup already.\n lr_decay_params = optimizer_params.pop(\"learning_rate_exp_decay\", {})\n\n # learning_rate = tf.train.exponential_decay(\n # self.params[\"learning_rate\"],\n # global_step,\n # decay_steps=self.params[\"lr_decay_steps\"],\n # decay_rate=self.params[\"lr_decay_rate\"],\n # staircase=True\n # )\n if lr_decay_params:\n learning_rate = tf.train.exponential_decay(\n lr_decay_params[\"learning_rate\"],\n global_step,\n decay_steps=lr_decay_params[\"lr_decay_steps\"],\n decay_rate=lr_decay_params[\"lr_decay_rate\"],\n staircase=lr_decay_params.get(\"staircase\", True),\n )\n optimizer_params[\"learning_rate\"] = learning_rate\n\n var_list = None\n if self.params[\"warm_start_dir\"]:\n output_vars1 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\"task_dependent\")\n output_vars2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\"task_independent/Variable_1\")\n var_list = [output_vars1, output_vars2]\n\n train_op = tf.train.AdamOptimizer(\n # learning_rate=self.params[\"learning_rate\"]\n # **self.params.get(\"optimizer_params\", {})\n # learning_rate=learning_rate\n **optimizer_params\n ).minimize(loss, global_step=global_step, var_list=var_list)\n\n return tf.estimator.EstimatorSpec(\n self.mode, loss=loss, train_op=train_op\n )\n" ]
[ [ "tensorflow.get_variable", "tensorflow.contrib.lookup.index_table_from_tensor", "tensorflow.metrics.accuracy", "tensorflow.layers.dropout", "numpy.vstack", "tensorflow.train.AdamOptimizer", "tensorflow.contrib.crf.crf_decode", "tensorflow.contrib.crf.crf_log_likelihood", "tensorflow.summary.scalar", "tensorflow.to_int64", "tensorflow.Variable", "tensorflow.get_collection", "tensorflow.layers.dense", "tensorflow.train.get_or_create_global_step", "tensorflow.name_scope", "numpy.zeros", "tensorflow.identity", "tensorflow.sequence_mask", "tensorflow.nn.embedding_lookup", "tensorflow.contrib.lookup.index_to_string_table_from_tensor", "tensorflow.reduce_mean", "tensorflow.contrib.layers.layer_norm", "tensorflow.estimator.EstimatorSpec", "tensorflow.variable_scope", "numpy.loadtxt" ] ]
pfriesch/neural-pipeline
[ "2df4f7467a721b1fbd93f4439086c6dcee5dac2c", "2df4f7467a721b1fbd93f4439086c6dcee5dac2c" ]
[ "examples/files/img_classification.py", "neural_pipeline/builtin/monitors/mpl.py" ]
[ "from neural_pipeline.builtin.monitors.tensorboard import TensorboardMonitor\nfrom neural_pipeline import DataProducer, AbstractDataset, TrainConfig, TrainStage,\\\n ValidationStage, Trainer, FileStructManager\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4 * 4 * 50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4 * 4 * 50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\nclass MNISTDataset(AbstractDataset):\n transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n\n def __init__(self, data_dir: str, is_train: bool):\n self.dataset = datasets.MNIST(data_dir, train=is_train, download=True)\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, item):\n data, target = self.dataset[item]\n return {'data': self.transforms(data), 'target': target}\n\n\nif __name__ == '__main__':\n fsm = FileStructManager(base_dir='data', is_continue=False)\n model = Net()\n\n train_dataset = DataProducer([MNISTDataset('data/dataset', True)], batch_size=4, num_workers=2)\n validation_dataset = DataProducer([MNISTDataset('data/dataset', False)], batch_size=4, num_workers=2)\n\n train_config = TrainConfig([TrainStage(train_dataset), ValidationStage(validation_dataset)], torch.nn.NLLLoss(),\n torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.5))\n\n trainer = Trainer(model, train_config, fsm, torch.device('cuda:0')).set_epoch_num(5)\n trainer.monitor_hub.add_monitor(TensorboardMonitor(fsm, is_continue=False))\n trainer.train()\n", "\"\"\"\nThis module contains Matplotlib monitor interface\n\"\"\"\n\nfrom random import shuffle\n\ntry:\n import matplotlib.pyplot as plt\n from matplotlib.ticker import MaxNLocator\nexcept ImportError:\n import sys\n print(\"Can't import Matplotlib in module neural-pipeline.builtin.mpl. Try perform 'pip install matplotlib'\", file=sys.stderr)\n sys.exit(1)\n\nimport numpy as np\n\nfrom neural_pipeline import AbstractMonitor\nfrom neural_pipeline.train_config import MetricsGroup\n\n\nclass MPLMonitor(AbstractMonitor):\n \"\"\"\n This monitor show all data in Matplotlib plots\n \"\"\"\n class _Plot:\n __cmap = plt.cm.get_cmap('hsv', 10)\n __cmap_indices = [i for i in range(10)]\n shuffle(__cmap_indices)\n\n def __init__(self, names: [str]):\n self._handle = names[0]\n\n self._prev_values = {}\n self._colors = {}\n self._axis = None\n\n def add_values(self, values: {}, epoch_idx: int) -> None:\n for n, v in values.items():\n self.add_value(n, v, epoch_idx)\n\n def add_value(self, name: str, val: float, epoch_idx: int) -> None:\n if name not in self._prev_values:\n self._prev_values[name] = None\n self._colors[name] = self.__cmap(self.__cmap_indices[len(self._colors)])\n prev_value = self._prev_values[name]\n if prev_value is not None and self._axis is not None:\n self._axis.plot([prev_value[1], epoch_idx], [prev_value[0], val], label=name, c=self._colors[name])\n self._prev_values[name] = [val, epoch_idx]\n\n def place_plot(self, axis) -> None:\n self._axis = axis\n\n for n, v in self._prev_values.items():\n self._axis.scatter(v[1], v[0], label=n, c=self._colors[n])\n\n self._axis.set_ylabel(self._handle)\n self._axis.set_xlabel('epoch')\n self._axis.xaxis.set_major_locator(MaxNLocator(integer=True))\n self._axis.legend()\n plt.grid()\n\n def __init__(self):\n super().__init__()\n\n self._realtime = True\n self._plots = {}\n self._plots_placed = False\n\n def update_losses(self, losses: {}):\n def on_loss(name: str, values: np.ndarray):\n plot = self._cur_plot(['loss', name])\n plot.add_value(name, np.mean(values), self.epoch_num)\n\n self._iterate_by_losses(losses, on_loss)\n\n if not self._plots_placed:\n self._place_plots()\n self._plots_placed = True\n\n if self._realtime:\n plt.pause(0.01)\n\n def update_metrics(self, metrics: {}) -> None:\n for metric in metrics['metrics']:\n self._process_metric(metric)\n\n for metrics_group in metrics['groups']:\n for metric in metrics_group.metrics():\n self._process_metric(metric, metrics_group.name())\n for group in metrics_group.groups():\n self._process_metric(group)\n\n def realtime(self, is_realtime: bool) -> 'MPLMonitor':\n \"\"\"\n Is need to show data updates in realtime\n\n :param is_realtime: is need realtime\n :return: self object\n \"\"\"\n self._realtime = is_realtime\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n plt.show()\n\n def _process_metric(self, cur_metric, parent_tag: str = None):\n if isinstance(cur_metric, MetricsGroup):\n for m in cur_metric.metrics():\n names = self._compile_names(parent_tag, [cur_metric.name(), m.name()])\n plot = self._cur_plot(names)\n if m.get_values().size > 0:\n plot.add_value(m.name(), np.mean(m.get_values), self.epoch_num)\n else:\n values = cur_metric.get_values().astype(np.float32)\n names = self._compile_names(parent_tag, [cur_metric.name()])\n plot = self._cur_plot(names)\n if values.size > 0:\n plot.add_value(cur_metric.name(), np.mean(values), self.epoch_num)\n\n @staticmethod\n def _compile_names(parent_tag: str, names: [str]):\n if parent_tag is not None:\n return [parent_tag] + names\n else:\n return names\n\n def _cur_plot(self, names: [str]) -> '_Plot':\n if names[0] not in self._plots:\n self._plots[names[0]] = self._Plot(names)\n return self._plots[names[0]]\n\n def _place_plots(self):\n number_of_subplots = len(self._plots)\n idx = 1\n for n, v in self._plots.items():\n v.place_plot(plt.subplot(number_of_subplots, 1, idx))\n idx += 1\n" ]
[ [ "torch.nn.NLLLoss", "torch.nn.functional.log_softmax", "torch.nn.Conv2d", "torch.nn.Linear", "torch.device", "torch.nn.functional.max_pool2d" ], [ "matplotlib.pyplot.cm.get_cmap", "matplotlib.pyplot.subplot", "numpy.mean", "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.grid", "matplotlib.pyplot.show", "matplotlib.pyplot.pause" ] ]
jyf588/SimGAN
[ "23283d7b5629f1653567b2437bb28aac1cc17169", "23283d7b5629f1653567b2437bb28aac1cc17169" ]
[ "third_party/a2c_ppo_acktr/model_split.py", "third_party/a2c_ppo_acktr/algo/ppo.py" ]
[ "# MIT License\n#\n# Copyright (c) 2017 Ilya Kostrikov and (c) 2020 Google LLC\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# SOFTWARE.\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom third_party.a2c_ppo_acktr.distributions import PlainDiagGaussian, FixedNormal\nfrom third_party.a2c_ppo_acktr.utils import init\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass SplitPolicy(nn.Module):\n def __init__(self, obs_shape, action_space, base_kwargs=None):\n super(SplitPolicy, self).__init__()\n if base_kwargs is None:\n base_kwargs = {}\n\n num_outputs = action_space.shape[0]\n\n # self.base = SplitPolicyBase(obs_shape[0], num_outputs, **base_kwargs)\n # self.dist = PlainDiagGaussian(num_outputs)\n\n self.base = SplitPolicyBaseNew(obs_shape[0], **base_kwargs) # num_feet not used\n self.dist = StateDiagGaussianNew(num_outputs, **base_kwargs) # hid size, num feet\n\n @property\n def is_recurrent(self):\n return False\n\n @property\n def recurrent_hidden_state_size(self):\n \"\"\"Size of rnn_hx.\"\"\"\n return 1\n\n def forward(self, inputs, rnn_hxs, masks):\n # not used\n raise NotImplementedError\n\n # def reset_variance(self, action_space, log_std):\n # num_outputs = action_space.shape[0]\n # self.dist.reset_variance(num_outputs, log_std)\n\n def act(self, inputs, rnn_hxs, masks, deterministic=False):\n value, actor_mean, rnn_hxs = self.base(inputs, rnn_hxs, masks)\n dist = self.dist(actor_mean)\n\n if deterministic:\n action = dist.mode()\n else:\n action = dist.sample()\n\n action_log_probs = dist.log_probs(action)\n dist_entropy = dist.entropy().mean()\n\n return value, action, action_log_probs, rnn_hxs\n\n def get_value(self, inputs, rnn_hxs, masks):\n value, _, _ = self.base(inputs, rnn_hxs, masks)\n return value\n\n def evaluate_actions(self, inputs, rnn_hxs, masks, action):\n value, actor_mean, rnn_hxs = self.base(inputs, rnn_hxs, masks)\n dist = self.dist(actor_mean)\n\n action_log_probs = dist.log_probs(action)\n dist_entropy = dist.entropy().mean()\n\n return value, action_log_probs, dist_entropy, rnn_hxs\n\n\nclass SplitPolicyBase(nn.Module):\n def __init__(self, num_inputs, num_outputs, hidden_size=64, num_feet=1):\n\n # split into two nets here, first one contact, second one actuator\n # share the same q,dq,a obs\n\n assert num_outputs == (4+3)*num_feet # contact 4, act 3\n\n # last layer is linear and gain is 1.0 rather than 1.414\n # probably need to move last layer from distribution to here\n\n super(SplitPolicyBase, self).__init__()\n\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), np.sqrt(2))\n init_final_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0))\n init_final_act_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), gain=0.02)\n\n self.actor_contact = nn.Sequential(\n init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh(),\n init_final_act_(nn.Linear(hidden_size, 4*num_feet)))\n\n self.actor_actuator = nn.Sequential(\n init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh(),\n init_final_act_(nn.Linear(hidden_size, 3*num_feet)))\n\n # keep value function unsplit, since 2 actors share one reward from D\n self.critic_full = nn.Sequential(\n init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh(),\n init_final_(nn.Linear(hidden_size, 1)))\n\n self.train()\n\n def forward(self, inputs, rnn_hxs, masks):\n x = inputs\n value = self.critic_full(x)\n\n # for _ in self.critic_full.parameters():\n # print(_)\n # break\n # print(self.actors[0].parameters()[0])\n\n # print(self.each_num_input)\n # print(x.size())\n # print(x[:self.each_num_input].size())\n\n y1 = self.actor_contact(x)\n y2 = self.actor_actuator(x)\n\n action_mean = torch.cat((y1, y2), 1)\n\n return value, action_mean, rnn_hxs\n\n\nclass SplitPolicyBaseNew(nn.Module):\n def __init__(self, num_inputs, hidden_size=64, num_feet=1):\n\n # split into two nets here, first one contact, second one actuator\n # share the same q,dq,a obs\n\n # last layer is linear and gain is 1.0 rather than 1.414\n # probably need to move last layer from distribution to here\n\n super(SplitPolicyBaseNew, self).__init__()\n\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), np.sqrt(2))\n init_final_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0))\n\n self.actor_contact = nn.Sequential(\n init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())\n\n self.actor_actuator = nn.Sequential(\n init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())\n\n # keep value function unsplit, since 2 actors share one reward from D\n self.critic_full = nn.Sequential(\n init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh(),\n init_final_(nn.Linear(hidden_size, 1)))\n\n self.train()\n\n def forward(self, inputs, rnn_hxs, masks):\n x = inputs\n value = self.critic_full(x)\n\n y1 = self.actor_contact(x)\n y2 = self.actor_actuator(x)\n\n action_feat = torch.cat((y1, y2), 1)\n\n return value, action_feat, rnn_hxs\n\n\nclass StateDiagGaussianNew(nn.Module):\n def __init__(self, num_outputs, hidden_size=64, num_feet=1):\n super(StateDiagGaussianNew, self).__init__()\n\n assert num_outputs == (4 + 3) * num_feet # contact 4, act 3\n self.hidden_size = hidden_size\n\n # weight, bias, weight_gain\n init_mean_ = lambda m: init(\n m, nn.init.orthogonal_,\n lambda x: nn.init.constant_(x, 0),\n gain=0.02)\n\n init_logstd_ = lambda m: init(\n m, nn.init.orthogonal_,\n lambda x: nn.init.constant_(x, -0.5),\n gain=1.0)\n\n self.contact_mean = init_mean_(nn.Linear(hidden_size, 4 * num_feet))\n self.actuator_mean = init_mean_(nn.Linear(hidden_size, 3 * num_feet))\n\n self.contact_logstd = init_logstd_(nn.Linear(hidden_size, 4 * num_feet))\n self.actuator_logstd = init_logstd_(nn.Linear(hidden_size, 3 * num_feet))\n\n def forward(self, x):\n contact_feat = x[:, :self.hidden_size]\n actuator_feat = x[:, self.hidden_size:]\n\n contact_mean = self.contact_mean(contact_feat)\n actuator_mean = self.actuator_mean(actuator_feat)\n\n contact_logstd = self.contact_logstd(contact_feat)\n actuator_logstd = self.actuator_logstd(actuator_feat)\n\n action_mean = torch.cat((contact_mean, actuator_mean), 1)\n action_logstd = torch.cat((contact_logstd, actuator_logstd), 1)\n\n return FixedNormal(action_mean, action_logstd.exp())\n # return MultiNormalWrapper(action_mean, scale_tril=torch.diag(action_logstd.exp()))\n", "# MIT License\n#\n# Copyright (c) 2017 Ilya Kostrikov\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom my_pybullet_envs.utils import mirror_obsact_batch\n\n\nclass PPO():\n def __init__(self,\n actor_critic,\n clip_param,\n ppo_epoch,\n num_mini_batch,\n value_loss_coef,\n entropy_coef,\n symmetry_coef=0,\n lr=None,\n eps=None,\n max_grad_norm=None,\n use_clipped_value_loss=True,\n mirror_obs=None,\n mirror_act=None):\n\n self.actor_critic = actor_critic\n\n self.clip_param = clip_param\n self.ppo_epoch = ppo_epoch\n self.num_mini_batch = num_mini_batch\n\n self.value_loss_coef = value_loss_coef\n self.entropy_coef = entropy_coef\n\n self.max_grad_norm = max_grad_norm\n self.use_clipped_value_loss = use_clipped_value_loss\n\n self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps)\n\n self.symmetry_coef = symmetry_coef\n self.mirror_obs = mirror_obs\n self.mirror_act = mirror_act\n\n self.is_cuda = next(actor_critic.parameters()).is_cuda\n\n def update(self, rollouts):\n advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]\n advantages = (advantages - advantages.mean()) / (\n advantages.std() + 1e-5)\n\n value_loss_epoch = 0\n action_loss_epoch = 0\n dist_entropy_epoch = 0\n\n for e in range(self.ppo_epoch):\n if self.actor_critic.is_recurrent:\n data_generator = rollouts.recurrent_generator(\n advantages, self.num_mini_batch)\n else:\n data_generator = rollouts.feed_forward_generator(\n advantages, self.num_mini_batch)\n\n for sample in data_generator:\n obs_batch, recurrent_hidden_states_batch, actions_batch, \\\n value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, \\\n adv_targ, *_ = sample\n\n # Reshape to do in a single forward pass for all steps\n values, action_log_probs, dist_entropy, _ = self.actor_critic.evaluate_actions(\n obs_batch, recurrent_hidden_states_batch, masks_batch,\n actions_batch)\n\n ratio = torch.exp(action_log_probs -\n old_action_log_probs_batch)\n surr1 = ratio * adv_targ\n surr2 = torch.clamp(ratio, 1.0 - self.clip_param,\n 1.0 + self.clip_param) * adv_targ\n action_loss = -torch.min(surr1, surr2).mean()\n\n if self.use_clipped_value_loss:\n value_pred_clipped = value_preds_batch + \\\n (values - value_preds_batch).clamp(-self.clip_param, self.clip_param)\n value_losses = (values - return_batch).pow(2)\n value_losses_clipped = (\n value_pred_clipped - return_batch).pow(2)\n value_loss = 0.5 * torch.max(value_losses,\n value_losses_clipped).mean()\n else:\n value_loss = 0.5 * (return_batch - values).pow(2).mean()\n\n # https://github.com/UBCMOCCA/SymmetricRL/blob/master/algorithms/ppo.py#L86\n if self.mirror_obs and self.symmetry_coef > 0:\n act_mean = self.actor_critic.act(\n obs_batch,\n recurrent_hidden_states_batch,\n masks_batch,\n deterministic=True,\n )[1]\n\n # pi - Ma(pi(Ms))\n # <=> Ma(pi) - pi(Ms)\n mirror_act_mean = mirror_obsact_batch(act_mean, self.is_cuda,\n self.mirror_act, augment=False)\n\n mirror_obs_batch = mirror_obsact_batch(obs_batch, self.is_cuda,\n self.mirror_obs, augment=False)\n\n act_mirror_mean = self.actor_critic.act(\n mirror_obs_batch,\n recurrent_hidden_states_batch,\n masks_batch,\n deterministic=True\n )[1]\n\n symmetry_loss = (mirror_act_mean - act_mirror_mean).pow(2).mean()\n else:\n symmetry_loss = 0\n\n self.optimizer.zero_grad()\n (value_loss * self.value_loss_coef\n + action_loss\n - dist_entropy * self.entropy_coef\n + symmetry_loss * self.symmetry_coef).backward()\n nn.utils.clip_grad_norm_(self.actor_critic.parameters(),\n self.max_grad_norm)\n self.optimizer.step()\n\n value_loss_epoch += value_loss.item()\n action_loss_epoch += action_loss.item()\n dist_entropy_epoch += dist_entropy.item()\n\n num_updates = self.ppo_epoch * self.num_mini_batch\n\n value_loss_epoch /= num_updates\n action_loss_epoch /= num_updates\n dist_entropy_epoch /= num_updates\n\n return value_loss_epoch, action_loss_epoch, dist_entropy_epoch\n" ]
[ [ "numpy.sqrt", "torch.cat", "torch.nn.init.constant_", "torch.nn.Tanh", "torch.nn.Linear" ], [ "torch.exp", "torch.clamp", "torch.max", "torch.min" ] ]
PilmautBotics/Tracking_SSD_ReID
[ "5fa2f83ac48c64dc1be24cc1a156d19ba5908dff" ]
[ "data_management/VOC2012ManagerObjDetection.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nPascal VOC2012 dataset manager\n\"\"\"\n\nimport os\nimport xml.etree.ElementTree as ET\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass VOC2012ManagerObjDetection:\n def __init__(self, path=\"\", trainRatio=0.7, batch_size=32, floatType=32):\n super(VOC2012ManagerObjDetection, self).__init__()\n if floatType == 32:\n self.floatType = tf.float32\n elif floatType == 16:\n self.floatType = tf.float16\n else:\n raise Exception('floatType should be either 32 or 16')\n\n self.path = path\n self.img_resolution = (300, 300)\n self.classes = {\n 'undefined': 0,\n 'aeroplane': 1,\n 'bicycle': 2,\n 'bird': 3,\n 'boat': 4,\n 'bottle': 5,\n 'bus': 6,\n 'car': 7,\n 'cat': 8,\n 'chair': 9,\n 'cow': 10,\n 'diningtable': 11,\n 'dog': 12,\n 'horse': 13,\n 'motorbike': 14,\n 'person': 15,\n 'pottedplant': 16,\n 'sheep': 17,\n 'sofa': 18,\n 'train': 19,\n 'tvmonitor': 20,\n }\n self.images_path = path + \"/JPEGImages/\"\n self.annotations_path = path + \"/Annotations/\"\n self.images_name = []\n if path != \"\":\n self.images_name = [\n im.replace(\".jpg\", \"\")\n for im in os.listdir(self.images_path)\n if os.path.isfile(os.path.join(self.images_path, im))\n ]\n self.number_samples = len(self.images_name)\n self.train_samples = int(self.number_samples * trainRatio)\n self.train_set = self.images_name[: self.train_samples - self.train_samples % batch_size]\n self.val_set = self.images_name[self.train_samples :]\n self.batches = [\n self.train_set[i : i + batch_size] for i in range(0, len(self.train_set), batch_size)\n ]\n\n def getRawData(self, images_name: list):\n \"\"\"\n Method to get images and annotations from a list of images name\n\n Args:\n - (list) images name without extension\n\n Return:\n - (tf.Tensor) Images of shape:\n [number of images, self.img_resolution]\n - (list of tf.Tensor) Boxes of shape:\n [number of images, number of objects, 4]\n - (list tf.Tensor) Classes of shape:\n [number of images, number of objects]\n \"\"\"\n images = []\n boxes = []\n classes = []\n for img in images_name:\n image = tf.keras.preprocessing.image.load_img(self.images_path + img + \".jpg\")\n w, h = image.size[0], image.size[1]\n image = tf.image.resize(np.array(image), self.img_resolution)\n images_array = tf.keras.preprocessing.image.img_to_array(image) / 255.0\n images.append(images_array)\n\n # annotation\n boxes_img_i, classes_img_i = self.getAnnotations(img, (w, h))\n boxes.append(boxes_img_i)\n classes.append(classes_img_i)\n return tf.convert_to_tensor(images, dtype=self.floatType), boxes, classes\n\n def getAnnotations(self, image_name: str, resolution: tuple):\n \"\"\"\n Method to get annotation: boxes and classes\n\n Args:\n - (str) image name without extension\n - (tuple) image resolution (W, H, C) or (W, H)\n\n Return:\n - (tf.Tensor) Boxes of shape: [number of objects, 4]\n - (tf.Tensor) Classes of shape: [number of objects]\n \"\"\"\n boxes = []\n classes = []\n objects = ET.parse(self.annotations_path + image_name + \".xml\").findall('object')\n for obj in objects:\n bndbox = obj.find('bndbox')\n xmin = float(bndbox.find('xmin').text) / resolution[0]\n ymin = float(bndbox.find('ymin').text) / resolution[1]\n xmax = float(bndbox.find('xmax').text) / resolution[0]\n ymax = float(bndbox.find('ymax').text) / resolution[1]\n\n # calculate cx, cy, width, height\n width = xmax - xmin\n height = ymax - ymin\n if xmin + width > 1.0 or ymin + height > 1.0 or xmin < 0.0 or ymin < 0.0:\n print(\n \"Boxe outside picture: (xmin, ymin, xmax, ymax):\\\n ({} {}, {}, {})\".format(\n xmin, ymin, xmax, ymax\n )\n )\n\n boxes.append([xmin + width / 2.0, ymin + height / 2.0, width, height])\n\n # get class\n name = obj.find('name').text.lower().strip()\n classes.append(self.classes[name])\n\n return tf.convert_to_tensor(boxes, dtype=self.floatType), tf.convert_to_tensor(\n classes, dtype=tf.int16\n )\n\n def getImagesAndGt(self, images_name: list, default_boxes: list):\n \"\"\"\n Method to get the groud truth for confidence and localization\n S: number of stage\n D: number of default boxes\n B: batch size (number of images)\n\n Args:\n - (list) images name without extension\n - (tf.Tensor) default boxes per stage: [D, 4]\n 4 parameters: cx, cy, w, h\n\n Return:\n - (tf.Tensor) Images of shape:\n [number of images, self.img_resolution]\n - (tf.Tensor) confs ground truth: [B, D]\n - (tf.Tensor) locs ground truth: [B, D, 4]\n \"\"\"\n images, boxes, classes = self.getRawData(images_name)\n gt_confs = []\n gt_locs = []\n for i, gt_boxes_img in enumerate(boxes):\n gt_confs_per_default_box = []\n gt_locs_per_default_box = []\n for d, default_box in enumerate(default_boxes):\n for g, gt_box in enumerate(gt_boxes_img):\n iou = self.computeJaccardIdx(gt_box, default_box)\n gt_conf = self.classes['undefined']\n gt_loc = tf.Variable([0.0, 0.0, 0.0, 0.0])\n if iou >= 0.5:\n gt_conf = tf.Variable(classes[i][g])\n gt_loc = self.getLocOffsets(gt_box, default_box)\n gt_confs_per_default_box.append(gt_conf)\n gt_locs_per_default_box.append(gt_loc)\n gt_confs.append(gt_confs_per_default_box)\n gt_locs.append(gt_locs_per_default_box)\n\n return (\n images,\n tf.convert_to_tensor(gt_confs, dtype=tf.int16),\n tf.convert_to_tensor(gt_locs, dtype=self.floatType),\n )\n\n def computeRectangleArea(self, xmin, ymin, xmax, ymax):\n return (xmax - xmin) * (ymax - ymin)\n\n def computeJaccardIdx(self, box_1: tf.Tensor, box_2: tf.Tensor):\n \"\"\"\n Method to get the Intersection-Over-Union between two boxes\n\n Args:\n - (tf.Tensor) box with 4 parameters: cx, cy, w, h [4]\n - (tf.Tensor) box with 4 parameters: cx, cy, w, h [4]\n\n Return:\n - (float) IoU value\n \"\"\"\n xmin_box_1 = box_1[0] - box_1[2] / 2.0\n ymin_box_1 = box_1[1] - box_1[3] / 2.0\n xmax_box_1 = box_1[0] + box_1[2] / 2.0\n ymax_box_1 = box_1[1] + box_1[3] / 2.0\n\n xmin_box_2 = box_2[0] - box_2[2] / 2.0\n ymin_box_2 = box_2[1] - box_2[3] / 2.0\n xmax_box_2 = box_2[0] + box_2[2] / 2.0\n ymax_box_2 = box_2[1] + box_2[3] / 2.0\n\n xmin_intersection = max(xmin_box_1, xmin_box_2)\n ymin_intersection = max(ymin_box_1, ymin_box_2)\n xmax_intersection = min(xmax_box_1, xmax_box_2)\n ymax_intersection = min(ymax_box_1, ymax_box_2)\n\n if xmin_intersection > xmax_intersection or ymin_intersection > ymax_intersection:\n return 0.0\n intersection = self.computeRectangleArea(\n xmin_intersection, ymin_intersection, xmax_intersection, ymax_intersection\n )\n\n union = (\n self.computeRectangleArea(xmin_box_1, ymin_box_1, xmax_box_1, ymax_box_1)\n + self.computeRectangleArea(xmin_box_2, ymin_box_2, xmax_box_2, ymax_box_2)\n - intersection\n )\n\n return intersection / union\n\n def getLocOffsets(self, box_gt: tf.Tensor, box_pred: tf.Tensor):\n \"\"\"\n Method to get the offset from box_pred to box_gt on cx, cy, w, h\n\n Args:\n - (tf.Tensor) box with 4 parameters: cx, cy, w, h [4]\n - (tf.Tensor) box with 4 parameters: cx, cy, w, h [4]\n\n Return:\n - (tf.Tensor) offset for the 4 parameters: cx, cy, w, h [4]\n \"\"\"\n\n return tf.Variable(\n [\n box_gt[0] - box_pred[0],\n box_gt[1] - box_pred[1],\n box_gt[2] - box_pred[2],\n box_gt[3] - box_pred[3],\n ]\n )\n\n def computeJaccardIdxSpeedUp(\n self, gt_box: tf.Tensor, default_boxes: tf.Tensor, iou_threshold: float\n ):\n \"\"\"\n Method to get the boolean tensor where iou is superior to\n the specified threshold between the gt box and the default one\n D: number of default boxes\n\n Args:\n - (tf.Tensor) box with 4 parameters: cx, cy, w, h [4]\n - (tf.Tensor) box with 4 parameters: cx, cy, w, h [D, 4]\n - (float) iou threshold to use\n\n Return:\n - (tf.Tensor) 0 if iou > threshold, 1 otherwise [D]\n \"\"\"\n # convert to xmin, ymin, xmax, ymax\n default_boxes = tf.concat(\n [\n default_boxes[:, :2] - default_boxes[:, 2:] / 2,\n default_boxes[:, :2] + default_boxes[:, 2:] / 2,\n ],\n axis=-1,\n )\n gt_box = tf.concat([gt_box[:2] - gt_box[2:] / 2, gt_box[:2] + gt_box[2:] / 2], axis=-1)\n gt_box = tf.expand_dims(gt_box, 0)\n gt_box = tf.repeat(gt_box, repeats=[default_boxes.shape[0]], axis=0)\n\n # compute intersection\n inter_xymin = tf.math.maximum(default_boxes[:, :2], gt_box[:, :2])\n inter_xymax = tf.math.minimum(default_boxes[:, 2:], gt_box[:, 2:])\n inter_width_height = tf.clip_by_value(inter_xymax - inter_xymin, 0.0, 300.0)\n inter_area = inter_width_height[:, 0] * inter_width_height[:, 1]\n\n # compute area of the boxes\n gt_box_width_height = tf.clip_by_value(gt_box[:, 2:] - gt_box[:, :2], 0.0, 300.0)\n gt_box_width_height_area = gt_box_width_height[:, 0] * gt_box_width_height[:, 1]\n\n default_boxes_width_height = tf.clip_by_value(\n default_boxes[:, 2:] - default_boxes[:, :2], 0.0, 300.0\n )\n default_boxes_width_height_area = (\n default_boxes_width_height[:, 0] * default_boxes_width_height[:, 1]\n )\n\n # compute iou\n iou = inter_area / (gt_box_width_height_area + default_boxes_width_height_area - inter_area)\n return tf.dtypes.cast(iou >= iou_threshold, tf.int16)\n\n def getLocOffsetsSpeedUp(self, gt_box: tf.Tensor, iou_bin: tf.Tensor, default_boxes: tf.Tensor):\n \"\"\"\n Method to get the offset from default boxes to box_gt on cx, cy, w, h\n where iou_idx is 1\n D: number of default boxes\n\n Args:\n - (tf.Tensor) box with 4 parameters: cx, cy, w, h [4]\n - (tf.Tensor) 1 if iou > threshold, 0 otherwise [D]\n - (tf.Tensor) default boxes with 4 parameters: cx, cy, w, h [D, 4]\n\n Return:\n - (tf.Tensor) offsets if iou_bin == 1, otherwise 0 [D, 4]\n \"\"\"\n gt_box = tf.expand_dims(gt_box, 0)\n gt_box = tf.repeat(gt_box, repeats=[default_boxes.shape[0]], axis=0)\n offsets = gt_box - default_boxes\n\n iou_bin = tf.expand_dims(iou_bin, 1)\n iou_bin = tf.repeat(iou_bin, repeats=[4], axis=1)\n offsets = offsets * tf.dtypes.cast(iou_bin, self.floatType)\n return offsets\n\n def getImagesAndGtSpeedUp(self, images_name: list, default_boxes: list):\n \"\"\"\n Method to get the groud truth for confidence and localization\n S: number of stage\n D: number of default boxes\n B: batch size (number of images)\n\n Args:\n - (list) images name without extension\n - (tf.Tensor) default boxes per stage: [D, 4]\n 4 parameters: cx, cy, w, h\n\n Return:\n - (tf.Tensor) Images of shape:\n [number of images, self.img_resolution]\n - (tf.Tensor) confs ground truth: [B, D]\n - (tf.Tensor) locs ground truth: [B, D, 4]\n \"\"\"\n images, boxes, classes = self.getRawData(images_name)\n gt_confs = []\n gt_locs = []\n for i, gt_boxes_img in enumerate(boxes):\n gt_confs_per_image = tf.zeros([len(default_boxes)], tf.int16)\n gt_locs_per_image = tf.zeros([len(default_boxes), 4], self.floatType)\n iou_bin_masks = []\n for g, gt_box in enumerate(gt_boxes_img):\n iou_bin = self.computeJaccardIdxSpeedUp(gt_box, default_boxes, 0.5)\n for mask in iou_bin_masks:\n iou_bin = tf.clip_by_value(iou_bin - mask, 0, 1)\n iou_bin_masks.append(iou_bin)\n gt_confs_per_image = gt_confs_per_image + iou_bin * classes[i][g]\n gt_locs_per_image = gt_locs_per_image + self.getLocOffsetsSpeedUp(\n gt_box, iou_bin, default_boxes\n )\n gt_confs.append(gt_confs_per_image)\n gt_locs.append(gt_locs_per_image)\n\n return (\n images,\n tf.convert_to_tensor(gt_confs, dtype=tf.int16),\n tf.convert_to_tensor(gt_locs, dtype=self.floatType),\n )\n" ]
[ [ "tensorflow.clip_by_value", "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.Variable", "tensorflow.math.minimum", "tensorflow.keras.preprocessing.image.load_img", "tensorflow.expand_dims", "tensorflow.repeat", "numpy.array", "tensorflow.keras.preprocessing.image.img_to_array", "tensorflow.dtypes.cast", "tensorflow.math.maximum" ] ]
alopezgit/project-adapt
[ "e93ab350344a5504f76f4e460002e0163996f88a" ]
[ "Models/model.py" ]
[ "\"\"\"\nAuthor: Wouter Van Gansbeke\nLicensed under the CC BY-NC 4.0 license (https://creativecommons.org/licenses/by-nc/4.0/)\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nimport torch.nn.functional as F\nimport numpy as np\nfrom .ERFNet import Net\nimport copy\nimport Utils.utils as utils\n\nclass uncertainty_net(nn.Module):\n def __init__(self, in_channels, out_channels=1):\n super(uncertainty_net, self).__init__()\n out_chan = 2\n\n self.combine = 'concat'\n self.in_channels = in_channels\n\n out_channels = 3\n\n self.depthnet = Net(in_channels=in_channels, out_channels=out_channels)\n\n local_channels_in = 2 if self.combine == 'concat' else 1\n local_channels_in = 4 if self.combine == 'concat_min' else local_channels_in\n self.convbnrelu = nn.Sequential(convbn(local_channels_in, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True))\n self.hourglass1 = hourglass_1(32)\n self.hourglass2 = hourglass_2(32)\n self.fuse = nn.Sequential(convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, out_chan, kernel_size=3, padding=1, stride=1, bias=True))\n self.activation = nn.ReLU(inplace=True)\n self.softmax = torch.nn.Softmax(dim=1) \n\n def forward(self, input, epoch=50):\n if self.in_channels > 1:\n rgb_in = input[:, 1:, :, :]\n lidar_in = input[:, 0:1, :, :]\n input = torch.cat([input[:,:1], rgb_in],1)\n else:\n lidar_in = input\n \n # 1. GLOBAL NET\n embeddings, features = self.depthnet(input)\n embedding0, embedding1, embedding2 = embeddings\n global_features = embedding0[:, 0:1, :, :]\n precise_depth = embedding0[:, 1:2, :, :]\n conf = embedding0[:, 2:, :, :]\n input_max = F.max_pool2d(input[:, 0:1], 16)\n input_max = input[:,0:1] * (input[:,0:1] >= F.interpolate(input_max, size=input.shape[2:]) - 0.1*F.interpolate(input_max, size=input.shape[2:])).float()\n mask = input[:,0:1] == 0\n input_min = -F.max_pool2d(-input[:,0:1] -100*mask.float(), 16)\n input_min = input[:,0:1] * (input[:,0:1] <= F.interpolate(input_min, size=input.shape[2:]) + 0.1*F.interpolate(input_min, size=input.shape[2:])).float()\n\n # 2. Fuse \n if self.combine == 'concat':\n input = torch.cat((lidar_in, global_features), 1)\n elif self.combine == 'concat_min':\n input = torch.cat((lidar_in, global_features, input_min, input_max), 1)\n elif self.combine == 'add':\n input = lidar_in + global_features\n elif self.combine == 'mul':\n input = lidar_in * global_features\n elif self.combine == 'sigmoid':\n input = lidar_in * nn.Sigmoid()(global_features)\n else:\n input = lidar_in\n # 3. LOCAL NET\n out = self.convbnrelu(input)\n out1, embedding3, embedding4 = self.hourglass1(out, embedding1, embedding2)\n out1 = out1 + out\n out2 = self.hourglass2(out1, embedding3, embedding4)\n out2 = out2 + out\n out = self.fuse(out2)\n lidar_out = out\n\n # 4. Late Fusion\n lidar_to_depth, lidar_to_conf = torch.chunk(out, 2, dim=1)\n lidar_to_conf, conf = torch.chunk(self.softmax(torch.cat((lidar_to_conf, conf), 1)), 2, dim=1)\n out = conf * precise_depth + lidar_to_conf * lidar_to_depth\n return out, lidar_out, precise_depth, global_features\n\n\ndef convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):\n\n return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation=dilation, bias=False))\n\n\nclass hourglass_1(nn.Module):\n def __init__(self, channels_in):\n super(hourglass_1, self).__init__()\n\n self.conv1 = nn.Sequential(convbn(channels_in, channels_in, kernel_size=3, stride=2, pad=1, dilation=1),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn(channels_in, channels_in, kernel_size=3, stride=1, pad=1, dilation=1)\n\n self.conv3 = nn.Sequential(convbn(channels_in*2, channels_in*2, kernel_size=3, stride=2, pad=1, dilation=1),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(convbn(channels_in*2, channels_in*2, kernel_size=3, stride=1, pad=1, dilation=1))\n\n self.conv5 = nn.Sequential(nn.ConvTranspose2d(channels_in*4, channels_in*2, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),\n nn.BatchNorm2d(channels_in*2),\n nn.ReLU(inplace=True))\n\n self.conv6 = nn.Sequential(nn.ConvTranspose2d(channels_in*2, channels_in, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),\n nn.BatchNorm2d(channels_in))\n ### Target Batch Norms\n self.conv5_bn_t = nn.BatchNorm2d(channels_in*2)\n self.conv6_bn_t = nn.BatchNorm2d(channels_in)\n\n self.conv5_bn_s = self.conv5[1]\n self.conv6_bn_s = self.conv6[1]\n\n\n \n def forward(self, x, em1, em2):\n x = self.conv1(x)\n x = self.conv2(x)\n x = F.relu(x, inplace=True)\n x = torch.cat((x, em1), 1)\n\n x_prime = self.conv3(x)\n x_prime = self.conv4(x_prime)\n x_prime = F.relu(x_prime, inplace=True)\n x_prime = torch.cat((x_prime, em2), 1)\n\n out = self.conv5(x_prime)\n out = self.conv6(out)\n\n return out, x, x_prime\n\n\nclass hourglass_2(nn.Module):\n def __init__(self, channels_in):\n super(hourglass_2, self).__init__()\n\n self.conv1 = nn.Sequential(convbn(channels_in, channels_in*2, kernel_size=3, stride=2, pad=1, dilation=1),\n nn.BatchNorm2d(channels_in*2),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn(channels_in*2, channels_in*2, kernel_size=3, stride=1, pad=1, dilation=1)\n\n self.conv3 = nn.Sequential(convbn(channels_in*2, channels_in*2, kernel_size=3, stride=2, pad=1, dilation=1),\n nn.BatchNorm2d(channels_in*2),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(convbn(channels_in*2, channels_in*4, kernel_size=3, stride=1, pad=1, dilation=1))\n\n self.conv5 = nn.Sequential(nn.ConvTranspose2d(channels_in*4, channels_in*2, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),\n nn.BatchNorm2d(channels_in*2),\n nn.ReLU(inplace=True))\n\n self.conv6 = nn.Sequential(nn.ConvTranspose2d(channels_in*2, channels_in, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),\n nn.BatchNorm2d(channels_in))\n\n\n\n\n ### Target Batch Norms\n self.conv1_bn_t = nn.BatchNorm2d(channels_in*2)\n self.conv3_bn_t = nn.BatchNorm2d(channels_in*2)\n self.conv5_bn_t = nn.BatchNorm2d(channels_in*2)\n self.conv6_bn_t = nn.BatchNorm2d(channels_in)\n\n self.conv1_bn_s = self.conv1[1]\n self.conv3_bn_s = self.conv3[1]\n self.conv5_bn_s = self.conv5[1]\n self.conv6_bn_s = self.conv6[1]\n\n\n def forward(self, x, em1, em2):\n x = self.conv1(x)\n x = self.conv2(x)\n x = x + em1\n x = F.relu(x, inplace=True)\n\n x_prime = self.conv3(x)\n x_prime = self.conv4(x_prime)\n x_prime = x_prime + em2\n x_prime = F.relu(x_prime, inplace=True)\n\n out = self.conv5(x_prime)\n out = self.conv6(out)\n\n return out\n\n" ]
[ [ "torch.nn.Softmax", "torch.nn.ConvTranspose2d", "torch.cat", "torch.nn.Conv2d", "torch.nn.Sigmoid", "torch.nn.functional.relu", "torch.nn.functional.interpolate", "torch.nn.BatchNorm2d", "torch.chunk", "torch.nn.ReLU", "torch.nn.functional.max_pool2d" ] ]
sameh999/kalam
[ "7a867c71daa4d230356d0b85fde2e36397ce608a" ]
[ "utils.py" ]
[ "import torch\r\nimport librosa \r\nfrom FastSpeech2.buckwalter import ar2bw, bw2ar\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\ndef load_file_to_data(file, srate = 16_000):\r\n batch = {} \r\n speech, sampling_rate = librosa.load(file, sr=srate)\r\n batch[\"speech\"] = speech\r\n batch[\"sampling_rate\"] = sampling_rate\r\n return batch\r\n\r\n\r\ndef predict(data, model, processor, mode = 'rec', \r\n bw = False, return_prob = False):\r\n if mode == 'rec':\r\n max_length = 128000\r\n features = processor(data[\"speech\"][:max_length],\r\n sampling_rate=data[\"sampling_rate\"],\r\n padding=True,\r\n max_length=max_length, \r\n pad_to_multiple_of=max_length,\r\n return_tensors=\"pt\")\r\n \r\n else:\r\n max_length = 320000\r\n features = processor(data[\"speech\"][:max_length], \r\n sampling_rate=data[\"sampling_rate\"],\r\n max_length=max_length,\r\n pad_to_multiple_of=max_length,\r\n padding=True, return_tensors=\"pt\")\r\n input_values = features.input_values.to(device)\r\n try:\r\n attention_mask = features.attention_mask.to(device)\r\n except:\r\n attention_mask = None \r\n with torch.no_grad():\r\n outputs = model(input_values, attention_mask = attention_mask)\r\n \r\n if mode == 'rec':\r\n if return_prob:\r\n raise('This parameter works for classification')\r\n pred_ids = torch.argmax(outputs.logits, dim=-1)\r\n text = processor.batch_decode(pred_ids)[0]\r\n\r\n if bw:\r\n text = \"\".join([bw2ar[l] if l in bw2ar else l for l in text])\r\n return text \r\n else:\r\n dialects = ['EGY','NOR','GLF','LAV','MSA']\r\n \r\n if not return_prob:\r\n pred_ids = torch.argmax(outputs['logits'], dim=-1)\r\n return dialects[pred_ids[0]]\r\n else:\r\n softmax = torch.nn.Softmax(dim = -1)\r\n probs = softmax(outputs['logits'])\r\n top_prob, top_lbls = torch.topk(probs[0], 5) \r\n return {dialects[top_lbls[lbl]]:format(float(top_prob[lbl]),'.2f') for lbl in range(5)}" ]
[ [ "torch.nn.Softmax", "torch.no_grad", "torch.cuda.is_available", "torch.topk", "torch.argmax" ] ]
va9abund/GAN-traffic-sign
[ "a9e7cc1c6839cdb0f29f68a951040fb27c64f738" ]
[ "classifier/train.py" ]
[ "# Python Standard Library\nfrom datetime import datetime\nimport json\nimport os\nimport time\n\n# Public Libraries\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, Flatten, Lambda, Dropout, MaxPooling2D\nfrom keras.optimizers import Adam\nfrom keras.utils.vis_utils import plot_model\nfrom keras.callbacks import TensorBoard\nfrom sklearn.model_selection import train_test_split\n\n# Project\nimport config\nimport helper\n\nDATETIME_FORMAT = '%y-%m-%d_%H-%M'\n\n# Overwrite model files\noverwrite_model = True\n\n# Data set ('test', 'train', 'valid')\ndata_set = 'train'\n\n# Load images and labels\nx, y = helper.load_data(data_set)\n\n# Select subsample for faster debugging\nsample_fraction = 1\nnum_samples = x.shape[0]\nsample_size = round(sample_fraction * num_samples)\nx = x[:sample_size]\ny = y[:sample_size]\n\n# Split into training and validation\ntest_fraction = 0.20\nx_train, x_val, y_train, y_val = train_test_split(x, y, random_state=0,\n test_size=test_fraction)\n\n# Hyperparams\nshape = x.shape[1:]\nnum_classes = config.NUM_CLASSES\nlearning_rate = 0.001\nbatch_size = 16\nepochs = 15\n\n# Class number to classification columns (categorical to dummy variables)\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_val = np_utils.to_categorical(y_val, num_classes)\n\n# Model of Convolutional Neural Network\nmodel = Sequential()\nmodel.add(Lambda(lambda p: p/255.0 - 0.5, input_shape=shape))\nmodel.add(Conv2D(3, (1, 1), activation='sigmoid'))\nmodel.add(Conv2D(16, (5, 5), strides=(2, 2), activation='elu'))\nmodel.add(Conv2D(32, (3, 3), activation='elu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(Conv2D(128, (3, 3), activation='relu'))\nmodel.add(Flatten())\nmodel.add(Dropout(0.5))\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\n\n# Print model summary\nmodel.summary()\n\n# Compile model\nmodel.compile(optimizer=Adam(lr=learning_rate), loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n# Start training train\nstart_time = time.time()\n\n# Configure Tensorboard log\ntimestamp = datetime.fromtimestamp(start_time).strftime(DATETIME_FORMAT)\nlog_dir = os.path.join(config.TENSORBOARD_LOG_DIR, timestamp)\ntbCallBack = TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True,\n write_images=True)\n\n# Train model\nhistory = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,\n verbose=2, validation_data=(x_val, y_val),\n callbacks=[tbCallBack])\n\n# Training duration\ntraining_time = time.time() - start_time\n\n# Print metrics of validation set\nprint('')\nprint('*** Training Complete ***')\nprint('Elapsed time: %.1f seconds' % training_time)\nscores = model.evaluate(x_val, y_val, verbose=0)\nnames = model.metrics_names\nprint('')\nprint('*** Metrics ***')\nfor name, score in zip(names, scores):\n print('%s: \\t%.4f' % (name, score))\n\n# Overwrite saved model\nif overwrite_model:\n model.save_weights(config.MODEL_WEIGHTS, overwrite=True)\n with open(config.MODEL_DEFINITION, 'w') as outfile:\n json.dump(model.to_json(), outfile)\n plot_model(model, config.MODEL_DIAGRAM, show_shapes=True)\n print('')\n print('*** Model Saved ***')\n" ]
[ [ "sklearn.model_selection.train_test_split" ] ]
songhaoyu/RCDG
[ "962c3b5803b766bd25577460aa90c2741d500d99" ]
[ "reinforcement_train/onmt/io/TextDataset.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom collections import Counter\nfrom itertools import chain\nimport io\nimport codecs\nimport sys\n\nimport torch\nimport torchtext\n\nfrom onmt.Utils import aeq\nfrom onmt.io.DatasetBase import ONMTDatasetBase, PAD_WORD, BOS_WORD, EOS_WORD\n\nfrom torch.nn.functional import Variable\nfrom torchtext.data.field import Field\n\nclass TextDataset(ONMTDatasetBase):\n \"\"\" Dataset for data_type=='text'\n\n Build `Example` objects, `Field` objects, and filter_pred function\n from text corpus.\n\n Args:\n fields (dict): a dictionary of `torchtext.data.Field`.\n Keys are like 'src', 'tgt', 'src_map', and 'alignment'.\n src_examples_iter (dict iter): preprocessed source example\n dictionary iterator.\n tgt_examples_iter (dict iter): preprocessed target example\n dictionary iterator.\n num_src_feats (int): number of source side features.\n num_tgt_feats (int): number of target side features.\n src_seq_length (int): maximum source sequence length.\n tgt_seq_length (int): maximum target sequence length.\n dynamic_dict (bool): create dynamic dictionaries?\n use_filter_pred (bool): use a custom filter predicate to filter\n out examples?\n \"\"\"\n def __init__(self, fields, src_examples_iter, tgt_examples_iter, per_examples_iter, nli_examples_iter,\n num_src_feats=0, num_tgt_feats=0,\n src_seq_length=0, tgt_seq_length=0, per_seq_length=0, nli_seq_length=0,\n dynamic_dict=True, use_filter_pred=True):\n self.data_type = 'text'\n\n # self.src_vocabs: mutated in dynamic_dict, used in\n # collapse_copy_scores and in Translator.py\n self.src_vocabs = []\n\n self.n_src_feats = num_src_feats\n self.n_tgt_feats = num_tgt_feats\n\n # Each element of an example is a dictionary whose keys represents\n # at minimum the src tokens and their indices and potentially also\n # the src and tgt features and alignment information.\n if tgt_examples_iter is not None:\n examples_iter = (self._join_dicts(src, tgt, per, nli) for src, tgt, per, nli in\n zip(src_examples_iter, tgt_examples_iter, per_examples_iter, nli_examples_iter))\n else:\n examples_iter = src_examples_iter\n\n if dynamic_dict:\n examples_iter = self._dynamic_dict(examples_iter)\n\n # Peek at the first to see which fields are used.\n ex, examples_iter = self._peek(examples_iter)\n keys = ex.keys()\n\n out_fields = [(k, fields[k]) if k in fields else (k, None)\n for k in keys]\n example_values = ([ex[k] for k in keys] for ex in examples_iter)\n out_examples = (self._construct_example_fromlist(\n ex_values, out_fields)\n for ex_values in example_values)\n # If out_examples is a generator, we need to save the filter_pred\n # function in serialization too, which would cause a problem when\n # `torch.save()`. Thus we materialize it as a list.\n out_examples = list(out_examples)\n\n def filter_pred(example):\n return 0 < len(example.src) <= src_seq_length \\\n and 0 < len(example.tgt) <= tgt_seq_length \\\n and 0 < len(example.per) <= per_seq_length \\\n and 0 < len(example.nli) <= nli_seq_length\n\n filter_pred = filter_pred if use_filter_pred else lambda x: True\n\n super(TextDataset, self).__init__(\n out_examples, out_fields, filter_pred\n )\n\n def sort_key(self, ex):\n \"\"\" Sort using length of source sentences. \"\"\"\n return len(ex.src)\n\n @staticmethod\n def collapse_copy_scores(scores, batch, tgt_vocab, src_vocabs):\n \"\"\"\n Given scores from an expanded dictionary\n corresponeding to a batch, sums together copies,\n with a dictionary word when it is ambigious.\n \"\"\"\n offset = len(tgt_vocab)\n for b in range(batch.batch_size):\n index = batch.indices.data[b]\n src_vocab = src_vocabs[index]\n for i in range(1, len(src_vocab)):\n sw = src_vocab.itos[i]\n ti = tgt_vocab.stoi[sw]\n if ti != 0:\n scores[:, b, ti] += scores[:, b, offset + i]\n scores[:, b, offset + i].fill_(1e-20)\n return scores\n\n @staticmethod\n def make_text_examples_nfeats_tpl(path, truncate, side):\n \"\"\"\n Args:\n path (str): location of a src or tgt file.\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".\n\n Returns:\n (example_dict iterator, num_feats) tuple.\n \"\"\"\n assert side in ['src', 'tgt', 'per', 'nli']\n\n if path is None:\n return (None, 0)\n\n # All examples have same number of features, so we peek first one\n # to get the num_feats.\n examples_nfeats_iter = \\\n TextDataset.read_text_file(path, truncate, side)\n\n first_ex = next(examples_nfeats_iter)\n num_feats = first_ex[1]\n\n # Chain back the first element - we only want to peek it.\n examples_nfeats_iter = chain([first_ex], examples_nfeats_iter)\n examples_iter = (ex for ex, nfeats in examples_nfeats_iter)\n\n return (examples_iter, num_feats)\n\n @staticmethod\n def read_text_file(path, truncate, side):\n \"\"\"\n Args:\n path (str): location of a src or tgt file.\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".\n\n Yields:\n (word, features, nfeat) triples for each line.\n \"\"\"\n with codecs.open(path, \"r\", \"utf-8\") as corpus_file:\n for i, line in enumerate(corpus_file):\n line = line.strip().split()\n if truncate:\n line = line[:truncate]\n\n words, feats, n_feats = \\\n TextDataset.extract_text_features(line)\n\n example_dict = {side: words, \"indices\": i}\n if feats:\n prefix = side + \"_feat_\"\n example_dict.update((prefix + str(j), f)\n for j, f in enumerate(feats))\n yield example_dict, n_feats\n\n @staticmethod\n def get_fields(n_src_features, n_tgt_features):\n \"\"\"\n Args:\n n_src_features (int): the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features (int): the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.\n \"\"\"\n fields = {}\n\n fields[\"src\"] = torchtext.data.Field(\n pad_token=PAD_WORD,\n include_lengths=True)\n\n for j in range(n_src_features):\n fields[\"src_feat_\"+str(j)] = \\\n torchtext.data.Field(pad_token=PAD_WORD)\n\n fields[\"tgt\"] = torchtext.data.Field(\n init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n for j in range(n_tgt_features):\n fields[\"tgt_feat_\"+str(j)] = \\\n torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n fields[\"per\"] = PerField(\n pad_token=PAD_WORD,\n include_lengths=True)\n\n fields[\"nli\"] = NliField(\n pad_token=PAD_WORD,\n include_lengths=True)\n\n def make_src(data, vocab, is_train):\n src_size = max([t.size(0) for t in data])\n src_vocab_size = max([t.max() for t in data]) + 1\n alignment = torch.zeros(src_size, len(data), src_vocab_size)\n for i, sent in enumerate(data):\n for j, t in enumerate(sent):\n alignment[j, i, t] = 1\n return alignment\n\n fields[\"src_map\"] = torchtext.data.Field(\n use_vocab=False, tensor_type=torch.FloatTensor,\n postprocessing=make_src, sequential=False)\n\n def make_tgt(data, vocab, is_train):\n tgt_size = max([t.size(0) for t in data])\n alignment = torch.zeros(tgt_size, len(data)).long()\n for i, sent in enumerate(data):\n alignment[:sent.size(0), i] = sent\n return alignment\n\n fields[\"alignment\"] = torchtext.data.Field(\n use_vocab=False, tensor_type=torch.LongTensor,\n postprocessing=make_tgt, sequential=False)\n\n fields[\"indices\"] = torchtext.data.Field(\n use_vocab=False, tensor_type=torch.LongTensor,\n sequential=False)\n\n return fields\n\n @staticmethod\n def get_num_features(corpus_file, side):\n \"\"\"\n Peek one line and get number of features of it.\n (All lines must have same number of features).\n For text corpus, both sides are in text form, thus\n it works the same.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.\n \"\"\"\n with codecs.open(corpus_file, \"r\", \"utf-8\") as cf:\n f_line = cf.readline().strip().split()\n _, _, num_feats = TextDataset.extract_text_features(f_line)\n\n return num_feats\n\n # Below are helper functions for intra-class use only.\n def _dynamic_dict(self, examples_iter):\n for example in examples_iter:\n src = example[\"src\"]\n src_vocab = torchtext.vocab.Vocab(Counter(src))\n self.src_vocabs.append(src_vocab)\n # Mapping source tokens to indices in the dynamic dict.\n src_map = torch.LongTensor([src_vocab.stoi[w] for w in src])\n example[\"src_map\"] = src_map\n\n if \"tgt\" in example:\n tgt = example[\"tgt\"]\n mask = torch.LongTensor(\n [0] + [src_vocab.stoi[w] for w in tgt] + [0])\n example[\"alignment\"] = mask\n yield example\n\n\nclass ShardedTextCorpusIterator(object):\n \"\"\"\n This is the iterator for text corpus, used for sharding large text\n corpus into small shards, to avoid hogging memory.\n\n Inside this iterator, it automatically divides the corpus file into\n shards of size `shard_size`. Then, for each shard, it processes\n into (example_dict, n_features) tuples when iterates.\n \"\"\"\n def __init__(self, corpus_path, line_truncate, side, shard_size,\n assoc_iter=None):\n \"\"\"\n Args:\n corpus_path: the corpus file path.\n line_truncate: the maximum length of a line to read.\n 0 for unlimited.\n side: \"src\" or \"tgt\".\n shard_size: the shard size, 0 means not sharding the file.\n assoc_iter: if not None, it is the associate iterator that\n this iterator should align its step with.\n \"\"\"\n try:\n # The codecs module seems to have bugs with seek()/tell(),\n # so we use io.open().\n self.corpus = io.open(corpus_path, \"r\", encoding=\"utf-8\")\n except IOError:\n sys.stderr.write(\"Failed to open corpus file: %s\" % corpus_path)\n sys.exit(1)\n\n self.line_truncate = line_truncate\n self.side = side\n self.shard_size = shard_size\n self.assoc_iter = assoc_iter\n self.last_pos = 0\n self.line_index = -1\n self.eof = False\n\n def __iter__(self):\n \"\"\"\n Iterator of (example_dict, nfeats).\n On each call, it iterates over as many (example_dict, nfeats) tuples\n until this shard's size equals to or approximates `self.shard_size`.\n \"\"\"\n if self.assoc_iter is not None:\n # We have associate iterator, just yields tuples\n # util we run parallel with it.\n while self.line_index < self.assoc_iter.line_index:\n line = self.corpus.readline()\n if line == '':\n raise AssertionError(\n \"Two corpuses must have same number of lines!\")\n\n self.line_index += 1\n yield self._example_dict_iter(line)\n\n if self.assoc_iter.eof:\n self.eof = True\n self.corpus.close()\n else:\n # Yield tuples util this shard's size reaches the threshold.\n self.corpus.seek(self.last_pos)\n while True:\n if self.shard_size != 0 and self.line_index % 64 == 0:\n # This part of check is time consuming on Py2 (but\n # it is quite fast on Py3, weird!). So we don't bother\n # to check for very line. Instead we chekc every 64\n # lines. Thus we are not dividing exactly per\n # `shard_size`, but it is not too much difference.\n cur_pos = self.corpus.tell()\n if cur_pos >= self.last_pos + self.shard_size:\n self.last_pos = cur_pos\n raise StopIteration\n\n line = self.corpus.readline()\n if line == '':\n self.eof = True\n self.corpus.close()\n raise StopIteration\n\n self.line_index += 1\n yield self._example_dict_iter(line)\n\n def hit_end(self):\n return self.eof\n\n @property\n def num_feats(self):\n # We peek the first line and seek back to\n # the beginning of the file.\n saved_pos = self.corpus.tell()\n\n line = self.corpus.readline().split()\n if self.line_truncate:\n line = line[:self.line_truncate]\n _, _, self.n_feats = TextDataset.extract_text_features(line)\n\n self.corpus.seek(saved_pos)\n\n return self.n_feats\n\n def _example_dict_iter(self, line):\n line = line.split()\n if self.line_truncate:\n line = line[:self.line_truncate]\n words, feats, n_feats = TextDataset.extract_text_features(line)\n example_dict = {self.side: words, \"indices\": self.line_index}\n if feats:\n # All examples must have same number of features.\n aeq(self.n_feats, n_feats)\n\n prefix = self.side + \"_feat_\"\n example_dict.update((prefix + str(j), f)\n for j, f in enumerate(feats))\n\n return example_dict\n\nclass PerField(Field):\n\n def process(self, batch, device, train):\n \"\"\" Process a list of examples to create a torch.Tensor.\n\n Pad, numericalize, and postprocess a batch and create a tensor.\n\n Args:\n batch (list(object)): A list of object from a batch of examples.\n Returns:\n data (torch.autograd.Varaible): Processed object given the input\n and custom postprocessing Pipeline.\n \"\"\"\n per_count = 4\n ret = []\n for i in range(per_count):\n # per = [(' '.join(sample)).strip().split('</s>')[i].strip().split(' ') for sample in batch]\n\n per = []\n for sample in batch:\n tmp = (' '.join(sample)).strip().split('</s>')\n if i < len(tmp):\n per.append(tmp[i].strip().split(' '))\n else:\n per.append(tmp[len(tmp)-1].strip().split(' '))\n\n padded = self.pad(per)\n per_tensor = self.numericalize(padded, device=device, train=train)\n ret.append(per_tensor)\n return ret\n\n def pad(self, minibatch):\n \"\"\"Pad a batch of examples using this field.\n\n Pads to self.fix_length if provided, otherwise pads to the length of\n the longest example in the batch. Prepends self.init_token and appends\n self.eos_token if those attributes are not None. Returns a tuple of the\n padded list and a list containing lengths of each example if\n `self.include_lengths` is `True` and `self.sequential` is `True`, else just\n returns the padded list. If `self.sequential` is `False`, no padding is applied.\n \"\"\"\n minibatch = list(minibatch)\n if not self.sequential:\n return minibatch\n if self.fix_length is None:\n max_len = max(len(x) for x in minibatch)\n else:\n max_len = self.fix_length + (\n self.init_token, self.eos_token).count(None) - 2\n padded, lengths = [], []\n for x in minibatch:\n if self.pad_first:\n padded.append(\n [self.pad_token] * max(0, max_len - len(x)) +\n ([] if self.init_token is None else [self.init_token]) +\n list(x[:max_len]) +\n ([] if self.eos_token is None else [self.eos_token]))\n else:\n padded.append(\n ([] if self.init_token is None else [self.init_token]) +\n list(x[:max_len]) +\n ([] if self.eos_token is None else [self.eos_token]) +\n [self.pad_token] * max(0, max_len - len(x)))\n lengths.append(len(padded[-1]) - max(0, max_len - len(x)))\n if self.include_lengths:\n return (padded, lengths)\n return padded\n\n\n def numericalize(self, arr, device=None, train=True):\n \"\"\"Turn a batch of examples that use this field into a Variable.\n\n If the field has include_lengths=True, a tensor of lengths will be\n included in the return value.\n\n Arguments:\n arr (List[List[str]], or tuple of (List[List[str]], List[int])):\n List of tokenized and padded examples, or tuple of List of\n tokenized and padded examples and List of lengths of each\n example if self.include_lengths is True.\n device (-1 or None): Device to create the Variable's Tensor on.\n Use -1 for CPU and None for the currently active GPU device.\n Default: None.\n train (boolean): Whether the batch is for a training set.\n If False, the Variable will be created with volatile=True.\n Default: True.\n \"\"\"\n if self.include_lengths and not isinstance(arr, tuple):\n raise ValueError(\"Field has include_lengths set to True, but \"\n \"input data is not a tuple of \"\n \"(data batch, batch lengths).\")\n if isinstance(arr, tuple):\n arr, lengths = arr\n lengths = torch.LongTensor(lengths)\n\n if self.use_vocab:\n if self.sequential:\n arr = [[self.vocab.stoi[x] for x in ex] for ex in arr]\n else:\n arr = [self.vocab.stoi[x] for x in arr]\n\n if self.postprocessing is not None:\n arr = self.postprocessing(arr, self.vocab, train)\n else:\n if self.tensor_type not in self.tensor_types:\n raise ValueError(\n \"Specified Field tensor_type {} can not be used with \"\n \"use_vocab=False because we do not know how to numericalize it. \"\n \"Please raise an issue at \"\n \"https://github.com/pytorch/text/issues\".format(self.tensor_type))\n numericalization_func = self.tensor_types[self.tensor_type]\n # It doesn't make sense to explictly coerce to a numeric type if\n # the data is sequential, since it's unclear how to coerce padding tokens\n # to a numeric type.\n if not self.sequential:\n arr = [numericalization_func(x) if isinstance(x, six.string_types)\n else x for x in arr]\n if self.postprocessing is not None:\n arr = self.postprocessing(arr, None, train)\n\n arr = self.tensor_type(arr)\n if self.sequential and not self.batch_first:\n arr.t_()\n if device == -1:\n if self.sequential:\n arr = arr.contiguous()\n else:\n arr = arr.cuda(device)\n if self.include_lengths:\n lengths = lengths.cuda(device)\n if self.include_lengths:\n return Variable(arr, volatile=not train), lengths\n return Variable(arr, volatile=not train)\n\n\nclass NliField(Field):\n\n def process(self, batch, device, train):\n \"\"\" Process a list of examples to create a torch.Tensor.\n\n Pad, numericalize, and postprocess a batch and create a tensor.\n\n Args:\n batch (list(object)): A list of object from a batch of examples.\n Returns:\n data (torch.autograd.Varaible): Processed object given the input\n and custom postprocessing Pipeline.\n \"\"\"\n nli_count = 3\n ret = []\n for i in range(nli_count):\n nli = [(' '.join(sample)).strip().split('</s>')[i].strip().split(' ') for sample in batch]\n padded = self.pad(nli)\n nli_tensor = self.numericalize(padded, i, device=device, train=train)\n ret.append(nli_tensor)\n return ret\n\n def pad(self, minibatch):\n minibatch = list(minibatch)\n if not self.sequential:\n return minibatch\n if self.fix_length is None:\n max_len = max(len(x) for x in minibatch)\n else:\n max_len = self.fix_length + (\n self.init_token, self.eos_token).count(None) - 2\n padded, lengths = [], []\n for x in minibatch:\n if self.pad_first:\n padded.append(\n [self.pad_token] * max(0, max_len - len(x)) +\n ([] if self.init_token is None else [self.init_token]) +\n list(x[:max_len]) +\n ([] if self.eos_token is None else [self.eos_token]))\n else:\n padded.append(\n ([] if self.init_token is None else [self.init_token]) +\n list(x[:max_len]) +\n ([] if self.eos_token is None else [self.eos_token]) +\n [self.pad_token] * max(0, max_len - len(x)))\n lengths.append(len(padded[-1]) - max(0, max_len - len(x)))\n if self.include_lengths:\n return (padded, lengths)\n return padded\n\n def numericalize(self, arr, nli_count, device=None, train=True):\n if self.include_lengths and not isinstance(arr, tuple):\n raise ValueError(\"Field has include_lengths set to True, but \"\n \"input data is not a tuple of \"\n \"(data batch, batch lengths).\")\n if isinstance(arr, tuple):\n arr, lengths = arr\n lengths = torch.LongTensor(lengths)\n\n if self.use_vocab:\n if nli_count == 2:\n tmp = []\n if self.sequential:\n for ex in arr:\n for x in ex:\n if x == \"negative\":\n tmp += [[1, 0, 0]]\n elif x == \"neutral\":\n tmp += [[0, 1, 0]]\n elif x == \"positive\":\n tmp += [[0, 0, 1]]\n else:\n tmp += [[0, 0, 0]]\n arr = tmp\n else:\n for x in arr:\n if x == \"negative\":\n tmp += [[1, 0, 0]]\n elif x == \"neutral\":\n tmp += [[0, 1, 0]]\n elif x == \"positive\":\n tmp += [[0, 0, 1]]\n else:\n tmp += [[0, 0, 0]]\n arr = tmp\n else:\n if self.sequential:\n arr = [[self.vocab.stoi[x] for x in ex] for ex in arr]\n else:\n arr = [self.vocab.stoi[x] for x in arr]\n\n if self.postprocessing is not None:\n arr = self.postprocessing(arr, self.vocab, train)\n else:\n if self.tensor_type not in self.tensor_types:\n raise ValueError(\n \"Specified Field tensor_type {} can not be used with \"\n \"use_vocab=False because we do not know how to numericalize it. \"\n \"Please raise an issue at \"\n \"https://github.com/pytorch/text/issues\".format(self.tensor_type))\n numericalization_func = self.tensor_types[self.tensor_type]\n # It doesn't make sense to explictly coerce to a numeric type if\n # the data is sequential, since it's unclear how to coerce padding tokens\n # to a numeric type.\n if not self.sequential:\n arr = [numericalization_func(x) if isinstance(x, six.string_types)\n else x for x in arr]\n if self.postprocessing is not None:\n arr = self.postprocessing(arr, None, train)\n\n arr = self.tensor_type(arr)\n if self.sequential and not self.batch_first:\n arr.t_()\n if device == -1:\n if self.sequential:\n arr = arr.contiguous()\n else:\n arr = arr.cuda(device)\n if self.include_lengths:\n lengths = lengths.cuda(device)\n if self.include_lengths:\n return Variable(arr, volatile=not train), lengths\n return Variable(arr, volatile=not train)" ]
[ [ "torch.LongTensor", "torch.nn.functional.Variable" ] ]
MCZhi/SMARTS
[ "3ef5650b04ac6fb7145cf4e23d5534d73e0929fc" ]
[ "zoo/policies/cross-rl-agent/cross_rl_agent/train/run_test.py" ]
[ "# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n# The author of this file is: https://github.com/mg2015started\n\n# The following test was modified from examples/ray_multi_instance.py\n\nimport argparse\nimport logging\nimport warnings\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\nfrom ac_network import ActorNetwork, CriticNetwork\nfrom adapters import (\n action_adapter,\n cross_interface,\n get_aux_info,\n observation_adapter,\n reward_adapter,\n)\nfrom config import HyperParameters\nfrom soc_mt_ac_network import SocMtActorNetwork, SocMtCriticNetwork\n\nfrom smarts.core.utils.episodes import episodes\nfrom smarts.zoo.agent_spec import AgentSpec\n\nwarnings.filterwarnings(\"ignore\")\n\nlogging.basicConfig(level=logging.INFO)\n\nAGENT_ID = \"Agent-007\"\nWITH_SOC_MT = True\n\n\ndef init_tensorflow():\n configProto = tf.compat.v1.ConfigProto()\n configProto.gpu_options.allow_growth = True\n # reset tensorflow graph\n tf.compat.v1.reset_default_graph()\n return configProto\n\n\ndef test(test_scenarios, sim_name, headless, num_episodes, seed):\n config = HyperParameters()\n configProto = init_tensorflow()\n # init env\n agent_spec = AgentSpec(\n # you can custom AgentInterface to control what obs information you need and the action type\n interface=cross_interface,\n # agent_builder=actor,\n # you can custom your observation adapter, reward adapter, info adapter, action adapter and so on.\n observation_adapter=observation_adapter,\n reward_adapter=reward_adapter,\n action_adapter=action_adapter,\n )\n\n env = gym.make(\n \"smarts.env:hiway-v0\",\n scenarios=test_scenarios,\n agent_specs={AGENT_ID: agent_spec},\n sim_name=sim_name,\n headless=headless,\n timestep_sec=0.1,\n seed=seed,\n )\n # init nets structure\n if WITH_SOC_MT:\n model_name = \"Soc_Mt_TD3Network\"\n actor = SocMtActorNetwork(name=\"actor\")\n critic_1 = SocMtCriticNetwork(name=\"critic_1\")\n critic_2 = SocMtCriticNetwork(name=\"critic_2\")\n else:\n model_name = \"TD3Network\"\n actor = ActorNetwork(name=\"actor\")\n critic_1 = CriticNetwork(name=\"critic_1\")\n critic_2 = CriticNetwork(name=\"critic_2\")\n saver = tf.compat.v1.train.Saver()\n with tf.compat.v1.Session(config=configProto) as sess:\n # load network\n saver = tf.compat.v1.train.import_meta_graph(\n \"models/\" + model_name + \".ckpt\" + \".meta\"\n )\n saver.restore(sess, \"models/\" + model_name + \".ckpt\")\n if saver is None:\n print(\"did not load\")\n\n # init testing params\n test_num = 100\n test_ep = 0\n # results record\n success = 0\n failure = 0\n passed_case = 0\n\n collision = 0\n trouble_collision = 0\n time_exceed = 0\n episode_time_record = []\n\n # start testing\n for episode in episodes(n=num_episodes):\n episode_reward = 0\n env_steps = 0 # step in one episode\n observations = env.reset() # states of all vehs\n state = observations[AGENT_ID] # ego state\n episode.record_scenario(env.scenario_log)\n dones = {\"__all__\": False}\n while not dones[\"__all__\"]:\n action = actor.get_action_noise(sess, state, rate=-1)\n observations, rewards, dones, infos = env.step(\n {AGENT_ID: action}\n ) # states of all vehs in next step\n\n # ego state in next step\n state = observations[AGENT_ID]\n if WITH_SOC_MT:\n reward = rewards[AGENT_ID]\n else:\n reward = np.sum(rewards.values())\n done = dones[AGENT_ID]\n info = infos[AGENT_ID]\n aux_info = get_aux_info(infos[AGENT_ID][\"env_obs\"])\n episode.record_step(observations, rewards, dones, infos)\n if WITH_SOC_MT:\n episode_reward += np.sum(reward)\n else:\n episode_reward += reward\n env_steps += 1\n\n if done:\n test_ep += 1\n # record result\n if aux_info == \"collision\":\n collision += 1\n failure += 1\n elif aux_info == \"trouble_collision\":\n trouble_collision += 1\n passed_case += 1\n elif aux_info == \"time_exceed\":\n time_exceed += 1\n failure += 1\n else:\n # get episode time\n episode_time_record.append(env_steps * 0.1)\n success += 1\n # print\n print(\n episode.index,\n \"EPISODE ended\",\n \"TOTAL REWARD {:.4f}\".format(episode_reward),\n \"Result:\",\n aux_info,\n )\n print(\"total step of this episode: \", env_steps)\n episode_reward = 0\n env_steps = 0\n observations = env.reset() # states of all vehs\n state = observations[AGENT_ID] # ego state\n env.close()\n\n print(\"-*\" * 15, \" result \", \"-*\" * 15)\n print(\"success: \", success, \"/\", test_num)\n print(\"collision: \", collision, \"/\", test_num)\n print(\"time_exceed: \", time_exceed, \"/\", test_num)\n print(\"passed_case: \", passed_case, \"/\", test_num)\n print(\"average time: \", np.mean(episode_time_record))\n\n\ndef main(\n test_scenarios,\n sim_name,\n headless,\n num_episodes,\n seed,\n):\n test(\n test_scenarios,\n sim_name,\n headless,\n num_episodes,\n seed,\n )\n\n\ndef default_argument_parser(program: str):\n \"\"\"This factory method returns a vanilla `argparse.ArgumentParser` with the\n minimum subset of arguments that should be supported.\n\n You can extend it with more `parser.add_argument(...)` calls or obtain the\n arguments via `parser.parse_args()`.\n \"\"\"\n parser = argparse.ArgumentParser(program)\n parser.add_argument(\n \"scenarios\",\n help=\"A list of scenarios. Each element can be either the scenario to run \"\n \"(see scenarios/ for some samples you can use) OR a directory of scenarios \"\n \"to sample from.\",\n type=str,\n nargs=\"+\",\n )\n parser.add_argument(\n \"--sim-name\",\n help=\"a string that gives this simulation a name.\",\n type=str,\n default=None,\n )\n parser.add_argument(\n \"--headless\", help=\"Run the simulation in headless mode.\", action=\"store_true\"\n )\n parser.add_argument(\"--seed\", type=int, default=42)\n parser.add_argument(\n \"--sumo-port\", help=\"Run SUMO with a specified port.\", type=int, default=None\n )\n parser.add_argument(\n \"--episodes\",\n help=\"The number of episodes to run the simulation for.\",\n type=int,\n default=100,\n )\n return parser\n\n\nif __name__ == \"__main__\":\n parser = default_argument_parser(\"pytorch-example\")\n args = parser.parse_args()\n\n main(\n test_scenarios=args.scenarios,\n sim_name=args.sim_name,\n headless=args.headless,\n num_episodes=args.episodes,\n seed=args.seed,\n )\n" ]
[ [ "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.train.import_meta_graph", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.reset_default_graph", "numpy.mean", "tensorflow.compat.v1.train.Saver", "numpy.sum" ] ]
pesavent/SMC
[ "8e53f73b81798761ef3aad3dc94e550eb83a3d9b" ]
[ "SMC_V3_11302020.py" ]
[ "# This python script was written by a simple biochemist with little formal programming training. Is the code great?\n# Nope, but it is indeed functional. The program was designed to accomplish a few quick\n# tasks, namely: how many ms2 events occur for a given precursors mass, summing ms2 ions for a given precursor \n# mass, and identifying possible positional isomers along with their respective ratios. As Histone Jim, many \n# of my thought processes started from a TDMS perspective with an analysis of histone proteins in mind. Also, I wrote \n# this code to integrate with software platforms that are freely available (MASH Explorer, TopPIC, etc.). If\n# you'd like to modify this code for your needs, please do so! If you have any questions about it, you can reach\n# me at [email protected]. \n\n# If using pyinstaller to make windows and mac executable programs, you must downgrade\n# matplotlib to version 3.1.0 or else it bugs out.\n\nimport numpy as np\nimport tkinter as tk\nimport time, sys\nimport matplotlib\n#import matplotlib.pyplot as plt\nimport matplotlib.backends._tkagg\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib import style\nstyle.use('ggplot')\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.figure import Figure\nfrom operator import itemgetter, attrgetter\nfrom datetime import datetime\nfrom tkinter import filedialog as fd\nfrom tkinter import messagebox as mb\nfrom tkinter import ttk\nfrom tkinter import *\nfrom collections.abc import Iterable\n\n\ns_fields = 'Precursor Monoiso. Mass (Da)', 'Start Scan', 'End Scan', 'Mass Tolerance (Da)', 'Histogram Bin Size'\ni_fields = 'PTM Isomer Mass (Da)', 'Isomer Mass Tolerance (Da)'\nfile_types = {'MSALIGN','CSV', 'TDValidator CSV'}\nprecursor_ions = []\nentries = []\ni_ents = []\nfilename = 'temp'\n\nclass App(object):\n\n def __init__(self,master):\n root.geometry(\"650x700\")\n root.wm_title(\"Search MS1 and Combine MS2 [SMC]\")\n self.content = tk.Frame(root, padx=2, pady=2)\n self.content.grid(column=0, row=0, sticky='nsew') \n self.file_var = StringVar(root)\n self.file_var.set('Select Type/File') \n self.popupMenu = OptionMenu(self.content, self.file_var, *file_types)\n Label(self.content, text=\"Select data type and open file:\").grid(row = 0, column = 0)\n self.popupMenu.grid(row = 0, column =1)\n self.file_var.trace('w',self.makeform)\n\n #menubar = Menu(root)\n #testmenu = Menu(menubar, tearoff=0)\n #testmenu.add_command(label=\"Open\")\n #menubar.add_cascade(label=\"File\", menu=testmenu)\n #root.config(menu=menubar)\n\n self.test_isomers = IntVar()\n self.isomer_btn = tk.Checkbutton(self.content, text=\"dMass/Isomer Search (Optional)\", variable=self.test_isomers)\n\n self.b1 = tk.Button(self.content, text='Run', fg=\"green\", command=(lambda : self.fetch()))\n self.b2 = tk.Button(self.content, text='Quit', command=root.destroy)\n self.b2.grid(row=6,column=0)\n self.e=StringVar()\n self.e.set(\" \")\n self.loading = tk.Label(self.content, textvariable=self.e)\n self.loading.grid(row=8, columnspan=4, sticky=\"ew\")\n \n root.columnconfigure(0, weight=1)\n root.rowconfigure(0, weight=1)\n self.content.columnconfigure(0, weight=2)\n self.content.columnconfigure(1, weight=2) \n\n \n def clear(self): \n Label(self.content, text=\"Select data type and open file:\").grid(row = 0, column = 0)\n self.popupMenu.grid(row = 0, column =1)\n self.e=StringVar()\n self.e.set(\" \")\n self.loading = tk.Label(self.content, textvariable=self.e)\n self.loading.grid(row=8, columnspan=4, sticky=\"ew\")\n \n #root.columnconfigure(0, weight=1)\n #root.rowconfigure(0, weight=1)\n #self.content.columnconfigure(0, weight=2)\n #self.content.columnconfigure(1, weight=2)\n \n \n def update_progress(self,progress):\n barLength = 15 # Modify this to change the length of the progress bar\n status = \"\"\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n status = \"error: progress var must be float\\r\\n\"\n if progress < 0:\n progress = 0\n status = \"Halt...\\r\\n\"\n if progress >= 1:\n progress = 1\n status = \"Done...\\r\\n\"\n block = int(round(barLength*progress))\n progresstext = \"\\rLoading (%): [{0}] {1}% {2}\".format( \"#\"*block + \"-\"*(barLength-block), int(progress*100), status)\n self.e.set(progresstext)\n #sys.stdout.write(progresstext)\n \n def display_output(self,filenames):\n outputframe = tk.Tk()\n outputframe.geometry(\"800x500\")\n outputframe.wm_title(\"SMC Results\")\n j=0\n\n for i in filenames:\n with open(i, \"r\") as f:\n data = f.read()\n data = data.replace(\"{}\", \"\")\n l = tk.Label(outputframe,\n text=\"Data from file name:\\n\"+i,\n font = 'Times 12',\n fg = 'green',\n bd = 1,\n relief = 'solid')\n t = tk.Text(outputframe,\n font = 'Times 10')\n t.insert(tk.END, data)\n t.grid(column=j,row=1,padx=3,sticky='news')\n l.grid(column=j,row=0)\n scrollb = ttk.Scrollbar(outputframe, command=t.yview)\n scrollb.grid(row=1, column=j+1, sticky='news')\n t['yscrollcommand'] = scrollb.set\n outputframe.columnconfigure(j, weight=1)\n outputframe.columnconfigure(j+1,minsize=50, weight=1)\n outputframe.rowconfigure(j+1, weight=1)\n j+=2\n\n\n #remove all characters from line function\n def rmv_chars(self,string):\n getVals = list([val for val in string \n if (val.isnumeric() or val==\".\")]) \n\n return \"\".join(getVals)\n\n\n def combine_ms2(self,array, mass, scanfirst, scanlast, tolerance):\n sent_array = np.array(array, dtype=object)\n new_array = []\n temp = []\n temp = np.array(temp,dtype=object)\n sorted_array = []\n sorted_array= np.array(sorted_array, dtype=object)\n\n #Find and store the number of activation events\n for i in range(len(array)):\n temp = np.append(temp, array[i,3])\n activation_types = np.unique(temp)\n\n if (len(sent_array)) == 0:\n return mb.showerror(\"No precursor ions within\",tolerance,\"Da of\",mass,\"to combine.\")\n\n #filter based off search criteria\n for i in range(len(sent_array)):\n if (sent_array[i][6] < (mass+tolerance)) and (sent_array[i][6] > (mass-tolerance)) and (sent_array[i][1] >= scanfirst) and (sent_array[i][1] <= scanlast):\n new_array.extend((sent_array[i][3],sent_array[i][6],sent_array[i][7],sent_array[i][1]))\n new_array = np.reshape(new_array,(int(len(new_array)/4),4))\n #new_array.dtype = object\n\n #sort by activation\n #final sort array is organized as [activation, precursor mass, [ms2 info],scan number]\n for i in activation_types:\n for j in range(len(new_array)):\n if i == new_array[j,0]:\n sorted_array = np.append(sorted_array,new_array[j])\n sorted_array = np.reshape(sorted_array,(int(len(sorted_array)/4),4))\n\n return sorted_array\n\n\n #This function takes an activation type-sorted array of [Activation type, Prec Mass, [MS2 ion info],Scan number] \n #and generates an output file per activation type. This file includes the searched mass, scans that were summed,\n #and the mass of each fragment ion from MS2 data from the searched, parameterized precursor mass.\n def output_file(self,sorted_array, mass):\n ms2_ions = []\n temp_ions = []\n date = datetime.today().strftime('%Y%m%d_%H%M%S')\n filenames = []\n a = []\n\n if (len(sorted_array)) == 0:\n mb.showerror(\"Warning\", \"Based on your search criteria, no results were generated.\")\n self.loading.destroy()\n else:\n activation = str(sorted_array[0][0])\n while len(sorted_array) != 0:\n ms2_ions.append(activation)\n filename = str(int(mass))+\"_\"+(activation).replace('ACTIVATION=','')+\"_\"+date+\".txt\"\n with open(filename,\"a\") as out_file:\n filenames.append(filename) \n out_file.write(activation+\"\\nSearched Mass=\"+str(mass)+\"\\nScans summed: \")\n for i in sorted_array:\n if str(i[0]) == activation:\n out_file.write(str(i[3])+\", \")\n out_file.write(\" \\nPrecursor ions selected:\")\n for i in sorted_array:\n if str(i[0]) == activation:\n out_file.write(str(i[1])+\", \")\n out_file.write(\" \\n\")\n for i in sorted_array:\n if str(sorted_array[0][0]) != activation:\n activation = str(sorted_array[0][0])\n break\n if i[0] == activation:\n if isinstance(i[2],Iterable):\n for j in i[2]:\n out_file.write(\"\\n\"+str(j[0]))\n temp_ions.append(j)\n a.append(temp_ions)\n sorted_array = np.delete(sorted_array,0,0)\n temp_ions = []\n ms2_ions.append(a)\n a = []\n\n #Do isomer search. ms2_ions has the following structure [((activation),([([a,b,c],)],),] where\n # a,b,c is the monoisotopic mass, intensity and charge of an ion within a single scan. \n # ms2_ions[1] is therefore a collection of all returned scans for a searched precursor mass from an\n # activation type (in ms2_ions[0]). For example, ms2_ions[1][0] would\n # have all the the ions for a single scan and ms2_ions[1][0][0] would have information on just\n # one ion from one scan from one activation type.\n ms2_ions = np.array(ms2_ions, dtype=object)\n if self.test_isomers.get() == 1:\n self.isomer_search(ms2_ions,mass) \n\n self.display_output(filenames) \n mb.showinfo(\"Run Completed\", \"The run has completed and generated an output file(s).\")\n self.loading.destroy()\n\n \n def makegraph(self,precursors, hist_bin_size):\n font = {'family' : 'DejaVu Sans','weight' : 'bold','size' : 12}\n\n bin_size = int(hist_bin_size.get())\n\n matplotlib.rc('font', **font)\n f = Figure(figsize=(1,4), dpi=100, tight_layout=True)\n a = f.add_subplot()\n a.hist(precursors, bin_size, density=False, facecolor='g', alpha=0.75)\n\n hist_bin_size.bind('<Return>', (lambda event: self.makegraph(precursors, hist_bin_size)))\n\n a.set_xlabel('Precursor Mass (Da)')\n a.set_ylabel('Number of MS2 Events')\n a.set_title('Histogram of Precursor Masses')\n\n canvas = FigureCanvasTkAgg(f, self.content)\n canvas.draw()\n canvas_widget = canvas.get_tk_widget()\n canvas_widget.grid(row=8, columnspan=4, sticky='news')\n\n toolbar_frame = tk.Frame(self.content) \n toolbar_frame.grid(row=7, columnspan=4) \n toolbar = NavigationToolbar2Tk(canvas, toolbar_frame)\n \n return\n\n\n def fetch(self):\n data = []\n data_isomers = []\n filetype = self.file_var.get()\n error = False\n\n if filetype == 'MSALIGN': \n for entry in entries: \n if entry[1].get() == \"\":\n error = self.callback(\"fail\", entry[0])\n elif float(entry[1].get()) > 100 and entry[0] == \"Mass Tolerance (Da)\":\n mb.showerror(\"Warning\", \"Mass Tolerance has to be less than 100 Da.\")\n error = True\n\n if filetype == 'CSV':\n for entry in i_ents: \n if entry[1].get() == \"\":\n error = self.callback(\"fail\", entry[0])\n if error:\n return\n else:\n self.process_CSV()\n \n if error:\n return\n\n else: \n for entry in entries:\n data.append(float(entry[1].get()))\n\n if filetype == \"MSALIGN\":\n self.callback(\"data\", data)\n\n\n def callback(self,param, data):\n self.clear()\n \n if param == \"MSALIGN\":\n self.b1.grid(row=6,column=1)\n scan = []\n entries[3][1].insert(0,1)\n entries[4][1].insert(0,1000)\n\n self.makegraph(self.process(data), entries[4][1])\n\n #Autopopulate scan range (based on loaded MSALIGN file) and mass tolerance \n for scans in precursor_ions:\n scan.append(scans[1])\n entries[1][1].insert(0,np.amin(scan))\n entries[2][1].insert(0,np.amax(scan))\n\n if param == \"CSV\":\n self.b1.grid(row=6,column=1)\n self.test_isomers.set(1)\n\n if param == \"fail\":\n mb.showerror(\"Warning\", \"Error: the \"+str(data)+\" field is empty.\")\n return True\n\n if param == \"data\":\n sorted_array = self.combine_ms2(precursor_ions, data[0], data[1], data[2], data[3])\n self.output_file(sorted_array, data[0])\n\n\n def makeform(self,*args):\n global entries\n global i_ents\n global filename\n global precursor_ions\n \n precursor_ions = []\n entries = []\n i_ents = []\n i=1\n \n filetype = self.file_var.get()\n \n if filetype == 'MSALIGN':\n extension = '.msalign'\n else:\n extension = '.csv'\n \n self.isomer_btn.grid(row=0,column=2, columnspan=2, sticky='news')\n \n filename = fd.askopenfilename(filetypes=[(filetype,extension),('All files','*.*')],\n title='Please select the '+filetype+' MS data file')\n\n\n if filetype == 'MSALIGN':\n for field in s_fields:\n lab = tk.Label(self.content, width=20, text=field, anchor='w')\n ent = tk.Entry(self.content, width=8)\n lab.grid(row=i, column=0)\n ent.grid(row=i, column=1)\n entries.append((field, ent))\n i+=1\n i=1\n\n for field in i_fields:\n lab = tk.Label(self.content, width=20, text=field, anchor='w')\n ent = tk.Entry(self.content, width=8)\n lab.grid(row=i, column=2)\n ent.grid(row=i, column=3)\n i_ents.append((field, ent))\n i+=1 \n \n if filetype == 'TDValidator CSV':\n search_params = fd.askopenfilename(filetypes=[('Search parameter','.csv')],title='Please select the isomer criteria file')\n if search_params == \"\":\n self.makeform\n else:\n self.tdvalidator_search(search_params)\n \n if filename == \"\":\n print('no file selected')\n self.makeform\n else:\n self.callback(filetype, filename)\n\n\n def process(self,name):\n global precursor_ions\n precursor_ions = []\n msfile = name\n temp_array = [0,0,0,0,0,0,0,0]\n precursors = []\n ms2_ions = []\n ms2_ions = np.array(ms2_ions)\n ms2events = 0\n counter = 0\n convert = []\n lines = []\n \n with open(msfile) as fp:\n data = fp.read()\n\n for i in data:\n if i == ',' or i == '\\n':\n link = \"\".join(convert)\n link.strip()\n if len(link) > 1:\n lines.append(link)\n convert = []\n else:\n convert.append(i)\n progress_bar_lines = len(lines)\n \n \n while (len(lines)-1)>0:\n text = str(lines[0])\n if text.startswith('ID='):\n temp_array[0] = int(self.rmv_chars(text))\n counter+=1\n self.update_progress((1-(len(lines)/progress_bar_lines))) #do progress update here b/c of time\n self.loading.configure(textvariable=self.e)\n root.update()\n elif text.startswith('SCANS='):\n temp_array[1] = int(self.rmv_chars(text))\n counter+=1\n elif text.startswith('RETENTION_TIME='):\n temp_array[2] = float(self.rmv_chars(text))\n counter+=1\n elif text.startswith('ACTIVATION='):\n temp_array[3] = str(text)\n counter+=1\n elif text.startswith('PRECURSOR_MZ='):\n temp_array[4] = float(self.rmv_chars(text))\n counter+=1\n elif text.startswith('PRECURSOR_CHARGE='):\n temp_array[5] = int(self.rmv_chars(text))\n counter+=1\n elif text.startswith('PRECURSOR_MASS='):\n temp_array[6] = float(self.rmv_chars(text))\n counter+=1 \n elif text[0].isdigit():\n counter+=1\n while lines[0]!='END IONS':\n ms2_ions = np.append(ms2_ions,[float(s) for s in lines[0].split(\"\\t\")])\n del lines[0]\n ms2_ions = np.reshape(ms2_ions,(int(len(ms2_ions)/3),3))\n temp_array[7] = ms2_ions\n precursor_ions.append(temp_array)\n temp_array = [0,0,0,0,0,0,0,0]\n ms2_ions = []\n ms2events+=1\n del lines[0]\n\n self.loading.destroy()\n \n #This is very important: Here is the final format of the np array:\n #For each precursor ion, the format is [ID,SCANS,RETENTION_TIME,ACTIVATION,PRECURSOR_MZ,PRECURSOR_CHARGE,PRECURSOR_MASS,[ARRAY OF MS2 IONS]]\n #The array of MS2 ions is [Mass,Intensity,Charge]\n precursor_ions = np.array(precursor_ions, dtype=object)\n\n #Autopopulate Precursor Mass if the MSALIGN file has only a single precursor ion (i.e., Thrash deconv from Mash Explorer)\n if ms2events == 1:\n entries[0][1].insert(0,precursor_ions[0][6])\n\n for i in precursor_ions:\n precursors.append(i[6])\n\n precursors = np.array(precursors)\n root.wm_title(name)\n return(precursors)\n\n def process_CSV(self):\n mass = 0\n activation = 'ACTIVATION=CSV'\n msfile = filename\n convert = []\n pool = []\n scan_ions = []\n organized_CSV = []\n j=0\n\n #input eThrash or csv file and convert it to string list\n with open(msfile) as fp:\n data = fp.read()\n\n for i in data:\n if i == ',' or i == '\\n':\n link = \"\".join(convert)\n pool.append(link)\n convert = []\n else:\n convert.append(i)\n pool = np.array(pool, dtype=object)\n pool = np.delete(pool,0,0)\n\n while len(pool) > 0:\n if pool[j] == \"\":\n pool = np.delete(pool,0,0)\n elif pool[j][0].isnumeric():\n a = [float(pool[j]),float(pool[j+1]),int(pool[j+2])]\n scan_ions.append(a)\n pool = np.delete(pool,[0,1,2],None)\n else:\n pool = np.delete(pool,0,0)\n organized_CSV.append(scan_ions)\n scan_ions = []\n a=[]\n\n organized_CSV = np.array((activation,organized_CSV), dtype=object)\n self.isomer_search(organized_CSV,mass)\n\n \n #This function takes a TDValidator .csv output file (m/z, intensity) and a user-defined parameter file\n #that is also a .csv file and formatted as follows (amino acid name, amino acid position, PTM name, fragment\n #ion mass INCLUDING PTM!). The function will automatically convert the fragment ion + PTM mass into m/z for\n #1+, 2+ and 3+ charge states and search for the monoisoptic peak of each within the TDValidator .csv file.\n def tdvalidator_search(self,search_params):\n pool = []\n convert = []\n matched_fragments = []\n date = datetime.today().strftime('%Y%m%d_%H%M%S')\n chargestates = [1,2,3]\n mz_search_list = []\n match_list = []\n proton = 1.007825\n tolerance_ppm = 20\n \n \n with open(search_params) as pfp:\n data_params = pfp.read()\n \n with open(filename) as dfp:\n data_tdvalidator = np.loadtxt(dfp, dtype=float, delimiter=\",\", unpack=False)\n\n for idx, val in enumerate(data_params):\n if val == ',' or val == '\\n' or idx == (len(data_params)-1):\n link = \"\".join(convert)\n pool.append(link)\n convert = []\n else:\n convert.append(val)\n pool = np.array(pool, dtype=object)\n \n #reshaped parameters is now [(amino acid, position, acetylstate, mass)]\n parameters = np.reshape(pool,(int(len(pool)/4),4))\n \n #convert fragment monoisotopic masses to m/z values based on chosen charge states\n for i in parameters:\n temp = []\n temp.extend((i[0],i[1],i[2]))\n for j in chargestates:\n temp.append((float(i[3])+j*proton)/(j))\n mz_search_list.append(temp)\n\n for j in range(len(chargestates)):\n for i in data_tdvalidator:\n k=0\n observed = i[0]\n while k < len(mz_search_list):\n temp_list = []\n theoretical = mz_search_list[k][j+3]\n delta = abs(theoretical - observed)\n ppm_error = delta/theoretical * (10 ** 6)\n if ppm_error <= tolerance_ppm:\n temp_list.extend((mz_search_list[k][0],int(mz_search_list[k][1]),mz_search_list[k][2],\n theoretical, observed,'z= '+str(chargestates[j]), i[1]))\n match_list.append(temp_list)\n k+=1\n match_list = sorted(match_list, key=itemgetter(1,2))\n \n tdval_filename = 'TDvalidator_out_'+date+\"_search.csv\"\n with open(tdval_filename,\"a\") as out_file:\n out_file.write(\"Residue, Position, PTM, Theoretical Mass, Observed Mass, Charge state, Intensity\")\n for i in match_list:\n line = str(i)[1:-1]\n out_file.write(\"\\n\"+line)\n \n #This function does NOT destinguish between ion types, so be careful! It is a preliminary isomer/deltaM\n #search tool that needs to be followed up with rigourous manual analysis. \n def isomer_search(self,ms2_ions,mass):\n date = datetime.today().strftime('%Y%m%d_%H%M%S')\n matched_fragments = []\n massdiff, tolerance = float(i_ents[0][1].get()),float(i_ents[1][1].get())\n header = [\"Mass 1\", \"Mass 2\", \"DeltaMass\",\"Intensity ratio [mass 1/mass 2]\"]\n\n for activation_type in range(0,len(ms2_ions),2):\n filename = str(int(mass))+\"_\"+str(ms2_ions[activation_type]).replace('ACTIVATION=','')+\"_\"+date+\"_isomers.txt\"\n with open(filename,\"a\") as out_file:\n for scan in range(len(ms2_ions[activation_type+1])):\n while len(ms2_ions[activation_type+1][scan]) > 0:\n for i in range(len(ms2_ions[activation_type+1][scan])-1):\n fmass = float(ms2_ions[activation_type+1][scan][0][0])\n nmass = float(ms2_ions[activation_type+1][scan][i][0])\n fint = float(ms2_ions[activation_type+1][scan][0][1])\n nint = float(ms2_ions[activation_type+1][scan][i][1])\n fcstate = int(ms2_ions[activation_type+1][scan][0][2]) \n ncstate = int(ms2_ions[activation_type+1][scan][i][2])\n\n if fmass >= nmass:\n test = fmass - nmass\n else:\n test = nmass - fmass\n if (massdiff - tolerance) <= test <= (massdiff + tolerance) and fcstate == ncstate:\n if fmass >= nmass:\n ratio = (fint/(fint+nint)) * 100\n a = [fmass,nmass,fcstate,test,ratio]\n else:\n ratio = (nint/(fint+nint)) * 100\n a = [nmass,fmass,fcstate,test,ratio]\n matched_fragments.append(a)\n ms2_ions[activation_type+1][scan] = np.delete(ms2_ions[activation_type+1][scan],0,0)\n\n matched_fragments = sorted(matched_fragments, key=lambda x: x[0])\n for i in header:\n out_file.write(str(i)+\", \")\n for i in matched_fragments:\n out_file.write(\"\\n\"+str(i))\n matched_fragments = []\n\n mb.showinfo(\"Run Completed\", \"The run has completed and generated an output file(s).\")\n\n\nroot = tk.Tk()\napp = App(root) \nroot.mainloop()\n \n\n" ]
[ [ "numpy.amax", "numpy.unique", "matplotlib.style.use", "matplotlib.use", "matplotlib.figure.Figure", "numpy.amin", "numpy.delete", "numpy.append", "numpy.loadtxt", "matplotlib.backends.backend_tkagg.NavigationToolbar2Tk", "numpy.array", "matplotlib.rc", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg" ] ]
muyuuuu/PyQt-learn
[ "a9134566505c88ae9d46ae23480b140c8c911c33" ]
[ "GUI/Basic-train/QtPandas/qtpandas/models/DataFrameModel.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nEasy integration of DataFrame into pyqt framework\n\n@author: Jev Kuznetsov, Matthias Ludwig - Datalyze Solutions\n\"\"\"\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nfrom builtins import range\nfrom builtins import super\nfrom builtins import int\nfrom builtins import round\nfrom builtins import str\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom datetime import datetime\nfrom qtpandas.utils import superReadFile\nfrom qtpandas.compat import Qt, QtCore, QtGui, Slot, Signal\n\nimport pandas\nimport numpy\n\nimport parser\nimport re\n\nfrom qtpandas.models.ColumnDtypeModel import ColumnDtypeModel\nfrom qtpandas.models.DataSearch import DataSearch\nfrom qtpandas.models.SupportedDtypes import SupportedDtypes\n\nDATAFRAME_ROLE = Qt.UserRole + 2\n\n\ndef read_file(filepath, **kwargs):\n \"\"\"\n Read a data file into a DataFrameModel.\n\n :param filepath: The rows/columns filepath to read.\n :param kwargs:\n xls/x files - see pandas.read_excel(**kwargs)\n .csv/.txt/etc - see pandas.read_csv(**kwargs)\n :return: DataFrameModel\n \"\"\"\n return DataFrameModel(dataFrame=superReadFile(filepath, **kwargs),\n filePath=filepath)\n\n\ndef read_sql(sql, con, filePath, index_col=None, coerce_float=True,\n params=None, parse_dates=None, columns=None, chunksize=None):\n \"\"\"\n Read SQL query or database table into a DataFrameModel.\n Provide a filePath argument in addition to the *args/**kwargs from\n pandas.read_sql and get a DataFrameModel.\n\n NOTE: The chunksize option is overridden to None always (for now).\n\n Reference:\n http://pandas.pydata.org/pandas-docs/version/0.18.1/generated/pandas.read_sql.html\n pandas.read_sql(sql, con, index_col=None, coerce_float=True,\n params=None, parse_dates=None, columns=None, chunksize=None)\n\n\n\n :return: DataFrameModel\n \"\"\"\n\n # TODO: Decide if chunksize is worth keeping and how to handle?\n df = pandas.read_sql(sql, con, index_col, coerce_float,\n params, parse_dates, columns, chunksize=None)\n return DataFrameModel(df, filePath=filePath)\n\n\n\n\n\nclass DataFrameModel(QtCore.QAbstractTableModel):\n \"\"\"data model for use in QTableView, QListView, QComboBox, etc.\n\n Attributes:\n timestampFormat (unicode): formatting string for conversion of timestamps to QtCore.QDateTime.\n Used in data method.\n sortingAboutToStart (QtCore.pyqtSignal): emitted directly before sorting starts.\n sortingFinished (QtCore.pyqtSignal): emitted, when sorting finished.\n dtypeChanged (Signal(columnName)): passed from related ColumnDtypeModel\n if a columns dtype has changed.\n changingDtypeFailed (Signal(columnName, index, dtype)):\n passed from related ColumnDtypeModel.\n emitted after a column has changed it's data type.\n dataChanged (Signal):\n Emitted, if data has changed, e.x. finished loading, new columns added or removed.\n It's not the same as layoutChanged.\n Usefull to reset delegates in the view.\n \"\"\"\n\n _float_precisions = {\n \"float16\": numpy.finfo(numpy.float16).precision - 2,\n \"float32\": numpy.finfo(numpy.float32).precision - 1,\n \"float64\": numpy.finfo(numpy.float64).precision - 1\n }\n\n \"\"\"list of int datatypes for easy checking in data() and setData()\"\"\"\n _intDtypes = SupportedDtypes.intTypes() + SupportedDtypes.uintTypes()\n \"\"\"list of float datatypes for easy checking in data() and setData()\"\"\"\n _floatDtypes = SupportedDtypes.floatTypes()\n \"\"\"list of bool datatypes for easy checking in data() and setData()\"\"\"\n _boolDtypes = SupportedDtypes.boolTypes()\n \"\"\"list of datetime datatypes for easy checking in data() and setData()\"\"\"\n _dateDtypes = SupportedDtypes.datetimeTypes()\n\n _timestampFormat = Qt.ISODate\n\n sortingAboutToStart = Signal()\n sortingFinished = Signal()\n dtypeChanged = Signal(int, object)\n changingDtypeFailed = Signal(object, QtCore.QModelIndex, object)\n dataChanged = Signal()\n dataFrameChanged = Signal()\n\n def __init__(self, dataFrame=None, copyDataFrame=False, filePath=None):\n \"\"\"\n\n Args:\n dataFrame (pandas.core.frame.DataFrame, optional): initializes the model with given DataFrame.\n If none is given an empty DataFrame will be set. defaults to None.\n copyDataFrame (bool, optional): create a copy of dataFrame or use it as is. defaults to False.\n If you use it as is, you can change it from outside otherwise you have to reset the dataFrame\n after external changes.\n filePath (str, optional): stores the original path for tracking.\n\n \"\"\"\n super(DataFrameModel, self).__init__()\n\n self._dataFrame = pandas.DataFrame()\n\n if dataFrame is not None:\n self.setDataFrame(dataFrame, copyDataFrame=copyDataFrame)\n\n self.dataChanged.emit()\n\n self._dataFrameOriginal = None\n self._search = DataSearch(\"nothing\", \"\")\n self.editable = False\n self._filePath = filePath\n\n @property\n def filePath(self):\n \"\"\"\n Access to the internal _filepath property (could be None)\n :return: qtpandas.models.DataFrameModel._filepath\n \"\"\"\n return self._filePath\n\n def dataFrame(self):\n \"\"\"\n getter function to _dataFrame. Holds all data.\n\n Note:\n It's not implemented with python properties to keep Qt conventions.\n Not sure why??\n \"\"\"\n return self._dataFrame\n\n def setDataFrameFromFile(self, filepath, **kwargs):\n \"\"\"\n Sets the model's dataFrame by reading a file.\n Accepted file formats:\n - .xlsx (sheet1 is read unless specified in kwargs)\n - .csv (comma separated unless specified in kwargs)\n - .txt (any separator)\n\n :param filepath: (str)\n The path to the file to be read.\n :param kwargs:\n pandas.read_csv(**kwargs) or pandas.read_excel(**kwargs)\n :return: None\n \"\"\"\n df = superReadFile(filepath, **kwargs)\n self.setDataFrame(df, filePath=filepath)\n\n def setDataFrame(self, dataFrame, copyDataFrame=False, filePath=None):\n \"\"\"\n Setter function to _dataFrame. Holds all data.\n\n Note:\n It's not implemented with python properties to keep Qt conventions.\n\n Raises:\n TypeError: if dataFrame is not of type pandas.core.frame.DataFrame.\n\n Args:\n dataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed.\n copyDataFrame (bool, optional): create a copy of dataFrame or use it as is. defaults to False.\n If you use it as is, you can change it from outside otherwise you have to reset the dataFrame\n after external changes.\n\n \"\"\"\n if not isinstance(dataFrame, pandas.core.frame.DataFrame):\n raise TypeError(\"not of type pandas.core.frame.DataFrame\")\n\n self.layoutAboutToBeChanged.emit()\n if copyDataFrame:\n self._dataFrame = dataFrame.copy()\n else:\n self._dataFrame = dataFrame\n\n self._columnDtypeModel = ColumnDtypeModel(dataFrame)\n self._columnDtypeModel.dtypeChanged.connect(self.propagateDtypeChanges)\n self._columnDtypeModel.changeFailed.connect(\n lambda columnName, index, dtype: self.changingDtypeFailed.emit(columnName, index, dtype)\n )\n if filePath is not None:\n self._filePath = filePath\n self.layoutChanged.emit()\n self.dataChanged.emit()\n self.dataFrameChanged.emit()\n\n @Slot(int, object)\n def propagateDtypeChanges(self, column, dtype):\n \"\"\"\n Emits a dtypeChanged signal with the column and dtype.\n\n :param column: (str)\n :param dtype: ??\n :return: None\n \"\"\"\n self.dtypeChanged.emit(column, dtype)\n\n @property\n def timestampFormat(self):\n \"\"\"getter to _timestampFormat\"\"\"\n return self._timestampFormat\n\n @timestampFormat.setter\n def timestampFormat(self, timestampFormat):\n \"\"\"\n Setter to _timestampFormat. Formatting string for conversion of timestamps to QtCore.QDateTime\n\n Raises:\n AssertionError: if timestampFormat is not of type unicode.\n\n Args:\n timestampFormat (unicode): assign timestampFormat to _timestampFormat.\n Formatting string for conversion of timestamps to QtCore.QDateTime. Used in data method.\n\n \"\"\"\n if not isinstance(timestampFormat, str):\n raise TypeError('not of type unicode')\n #assert isinstance(timestampFormat, unicode) or timestampFormat.__class__.__name__ == \"DateFormat\", \"not of type unicode\"\n self._timestampFormat = timestampFormat\n\n def rename(self, index=None, columns=None, **kwargs):\n \"\"\"\n Renames the dataframe inplace calling appropriate signals.\n Wraps pandas.DataFrame.rename(*args, **kwargs) - overrides\n the inplace kwarg setting it to True.\n\n Example use:\n renames = {'colname1':'COLNAME_1', 'colname2':'COL2'}\n DataFrameModel.rename(columns=renames)\n\n :param args:\n see pandas.DataFrame.rename\n :param kwargs:\n see pandas.DataFrame.rename\n :return:\n None\n \"\"\"\n kwargs['inplace'] = True\n self.layoutAboutToBeChanged.emit()\n self._dataFrame.rename(index, columns, **kwargs)\n self.layoutChanged.emit()\n self.dataChanged.emit()\n self.dataFrameChanged.emit()\n\n def applyFunction(self, func):\n \"\"\"\n Applies a function to the dataFrame with appropriate signals.\n The function must return a dataframe.\n :param func: A function (or partial function) that accepts a dataframe as the first argument.\n :return: None\n :raise:\n AssertionError if the func is not callable.\n AssertionError if the func does not return a DataFrame.\n \"\"\"\n assert callable(func), \"function {} is not callable\".format(func)\n self.layoutAboutToBeChanged.emit()\n df = func(self._dataFrame)\n assert isinstance(df, pandas.DataFrame), \"function {} did not return a DataFrame.\".format(func.__name__)\n self._dataFrame = df\n self.layoutChanged.emit()\n self.dataChanged.emit()\n self.dataFrameChanged.emit()\n\n def headerData(self, section, orientation, role=Qt.DisplayRole):\n \"\"\"\n Return the header depending on section, orientation and Qt::ItemDataRole\n\n Args:\n section (int): For horizontal headers, the section number corresponds to the column number.\n Similarly, for vertical headers, the section number corresponds to the row number.\n orientation (Qt::Orientations):\n role (Qt::ItemDataRole):\n\n Returns:\n None if not Qt.DisplayRole\n _dataFrame.columns.tolist()[section] if orientation == Qt.Horizontal\n section if orientation == Qt.Vertical\n None if horizontal orientation and section raises IndexError\n \"\"\"\n if role != Qt.DisplayRole:\n return None\n\n if orientation == Qt.Horizontal:\n try:\n label = self._dataFrame.columns.tolist()[section]\n if label == section:\n label = section\n return label\n except (IndexError, ):\n return None\n elif orientation == Qt.Vertical:\n return section\n\n def data(self, index, role=Qt.DisplayRole):\n \"\"\"return data depending on index, Qt::ItemDataRole and data type of the column.\n\n Args:\n index (QtCore.QModelIndex): Index to define column and row you want to return\n role (Qt::ItemDataRole): Define which data you want to return.\n\n Returns:\n None if index is invalid\n None if role is none of: DisplayRole, EditRole, CheckStateRole, DATAFRAME_ROLE\n\n if role DisplayRole:\n unmodified _dataFrame value if column dtype is object (string or unicode).\n _dataFrame value as int or long if column dtype is in _intDtypes.\n _dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision).\n None if column dtype is in _boolDtypes.\n QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template.\n\n if role EditRole:\n unmodified _dataFrame value if column dtype is object (string or unicode).\n _dataFrame value as int or long if column dtype is in _intDtypes.\n _dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision).\n _dataFrame value as bool if column dtype is in _boolDtypes.\n QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template.\n\n if role CheckStateRole:\n Qt.Checked or Qt.Unchecked if dtype is numpy.bool_ otherwise None for all other dtypes.\n\n if role DATAFRAME_ROLE:\n unmodified _dataFrame value.\n\n raises TypeError if an unhandled dtype is found in column.\n \"\"\"\n\n if not index.isValid():\n return None\n\n def convertValue(row, col, columnDtype):\n value = None\n if columnDtype == object:\n value = self._dataFrame.ix[row, col]\n elif columnDtype in self._floatDtypes:\n value = round(float(self._dataFrame.ix[row, col]), self._float_precisions[str(columnDtype)])\n elif columnDtype in self._intDtypes:\n value = int(self._dataFrame.ix[row, col])\n elif columnDtype in self._boolDtypes:\n # TODO this will most likely always be true\n # See: http://stackoverflow.com/a/715455\n # well no: I am mistaken here, the data is already in the dataframe\n # so its already converted to a bool\n value = bool(self._dataFrame.ix[row, col])\n\n elif columnDtype in self._dateDtypes:\n #print numpy.datetime64(self._dataFrame.ix[row, col])\n value = pandas.Timestamp(self._dataFrame.ix[row, col])\n value = QtCore.QDateTime.fromString(str(value), self.timestampFormat)\n #print value\n # else:\n # raise TypeError, \"returning unhandled data type\"\n return value\n\n row = self._dataFrame.index[index.row()]\n col = self._dataFrame.columns[index.column()]\n columnDtype = self._dataFrame[col].dtype\n\n if role == Qt.DisplayRole:\n # return the value if you wanne show True/False as text\n if columnDtype == numpy.bool:\n result = self._dataFrame.ix[row, col]\n else:\n result = convertValue(row, col, columnDtype)\n elif role == Qt.EditRole:\n result = convertValue(row, col, columnDtype)\n elif role == Qt.CheckStateRole:\n if columnDtype == numpy.bool_:\n if convertValue(row, col, columnDtype):\n result = Qt.Checked\n else:\n result = Qt.Unchecked\n else:\n result = None\n elif role == DATAFRAME_ROLE:\n result = self._dataFrame.ix[row, col]\n else:\n result = None\n return result\n\n def flags(self, index):\n \"\"\"Returns the item flags for the given index as ored value, e.x.: Qt.ItemIsUserCheckable | Qt.ItemIsEditable\n\n If a combobox for bool values should pop up ItemIsEditable have to set for bool columns too.\n\n Args:\n index (QtCore.QModelIndex): Index to define column and row\n\n Returns:\n if column dtype is not boolean Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable\n if column dtype is boolean Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable\n \"\"\"\n flags = super(DataFrameModel, self).flags(index)\n\n if not self.editable:\n return flags\n\n col = self._dataFrame.columns[index.column()]\n if self._dataFrame[col].dtype == numpy.bool:\n flags |= Qt.ItemIsUserCheckable\n else:\n # if you want to have a combobox for bool columns set this\n flags |= Qt.ItemIsEditable\n\n return flags\n\n def setData(self, index, value, role=Qt.DisplayRole):\n \"\"\"Set the value to the index position depending on Qt::ItemDataRole and data type of the column\n\n Args:\n index (QtCore.QModelIndex): Index to define column and row.\n value (object): new value.\n role (Qt::ItemDataRole): Use this role to specify what you want to do.\n\n Raises:\n TypeError: If the value could not be converted to a known datatype.\n\n Returns:\n True if value is changed. Calls layoutChanged after update.\n False if value is not different from original value.\n\n \"\"\"\n if not index.isValid() or not self.editable:\n return False\n\n if value != index.data(role):\n\n self.layoutAboutToBeChanged.emit()\n\n row = self._dataFrame.index[index.row()]\n col = self._dataFrame.columns[index.column()]\n #print 'before change: ', index.data().toUTC(), self._dataFrame.iloc[row][col]\n columnDtype = self._dataFrame[col].dtype\n\n if columnDtype == object:\n pass\n\n elif columnDtype in self._intDtypes:\n dtypeInfo = numpy.iinfo(columnDtype)\n if value < dtypeInfo.min:\n value = dtypeInfo.min\n elif value > dtypeInfo.max:\n value = dtypeInfo.max\n\n elif columnDtype in self._floatDtypes:\n value = numpy.float64(value).astype(columnDtype)\n\n elif columnDtype in self._boolDtypes:\n value = numpy.bool_(value)\n\n elif columnDtype in self._dateDtypes:\n # convert the given value to a compatible datetime object.\n # if the conversation could not be done, keep the original\n # value.\n if isinstance(value, QtCore.QDateTime):\n value = value.toString(self.timestampFormat)\n try:\n value = pandas.Timestamp(value)\n except Exception:\n raise Exception(\"Can't convert '{0}' into a datetime\".format(value))\n # return False\n else:\n raise TypeError(\"try to set unhandled data type\")\n\n self._dataFrame.set_value(row, col, value)\n\n #print 'after change: ', value, self._dataFrame.iloc[row][col]\n self.layoutChanged.emit()\n return True\n else:\n return False\n\n\n def rowCount(self, index=QtCore.QModelIndex()):\n \"\"\"returns number of rows\n\n Args:\n index (QtCore.QModelIndex, optional): Index to define column and row. defaults to empty QModelIndex\n\n Returns:\n number of rows\n \"\"\"\n # len(df.index) is faster, so use it:\n # In [12]: %timeit df.shape[0]\n # 1000000 loops, best of 3: 437 ns per loop\n # In [13]: %timeit len(df.index)\n # 10000000 loops, best of 3: 110 ns per loop\n # %timeit df.__len__()\n # 1000000 loops, best of 3: 215 ns per loop\n return len(self._dataFrame.index)\n\n def columnCount(self, index=QtCore.QModelIndex()):\n \"\"\"returns number of columns\n\n Args:\n index (QtCore.QModelIndex, optional): Index to define column and row. defaults to empty QModelIndex\n\n Returns:\n number of columns\n \"\"\"\n # speed comparison:\n # In [23]: %timeit len(df.columns)\n # 10000000 loops, best of 3: 108 ns per loop\n\n # In [24]: %timeit df.shape[1]\n # 1000000 loops, best of 3: 440 ns per loop\n return len(self._dataFrame.columns)\n\n def sort(self, columnId, order=Qt.AscendingOrder):\n \"\"\"\n Sorts the model column\n\n After sorting the data in ascending or descending order, a signal\n `layoutChanged` is emitted.\n\n :param: columnId (int)\n the index of the column to sort on.\n :param: order (Qt::SortOrder, optional)\n descending(1) or ascending(0). defaults to Qt.AscendingOrder\n\n \"\"\"\n self.layoutAboutToBeChanged.emit()\n self.sortingAboutToStart.emit()\n column = self._dataFrame.columns[columnId]\n self._dataFrame.sort_values(column, ascending=not bool(order), inplace=True)\n self.layoutChanged.emit()\n self.sortingFinished.emit()\n\n def setFilter(self, search):\n \"\"\"\n Apply a filter and hide rows.\n\n The filter must be a `DataSearch` object, which evaluates a python\n expression.\n If there was an error while parsing the expression, the data will remain\n unfiltered.\n\n Args:\n search(qtpandas.DataSearch): data search object to use.\n\n Raises:\n TypeError: An error is raised, if the given parameter is not a\n `DataSearch` object.\n\n \"\"\"\n if not isinstance(search, DataSearch):\n raise TypeError('The given parameter must an `qtpandas.DataSearch` object')\n\n self._search = search\n\n self.layoutAboutToBeChanged.emit()\n\n if self._dataFrameOriginal is not None:\n self._dataFrame = self._dataFrameOriginal\n self._dataFrameOriginal = self._dataFrame.copy()\n\n self._search.setDataFrame(self._dataFrame)\n searchIndex, valid = self._search.search()\n\n if valid:\n self._dataFrame = self._dataFrame[searchIndex]\n self.layoutChanged.emit()\n else:\n self.clearFilter()\n self.layoutChanged.emit()\n\n self.dataFrameChanged.emit()\n\n def clearFilter(self):\n \"\"\"\n Clear all filters.\n \"\"\"\n if self._dataFrameOriginal is not None:\n self.layoutAboutToBeChanged.emit()\n self._dataFrame = self._dataFrameOriginal\n self._dataFrameOriginal = None\n self.layoutChanged.emit()\n\n def columnDtypeModel(self):\n \"\"\"\n Getter for a ColumnDtypeModel.\n\n :return:\n qtpandas.models.ColumnDtypeModel\n \"\"\"\n return self._columnDtypeModel\n\n\n def enableEditing(self, editable=True):\n \"\"\"\n Sets the DataFrameModel and columnDtypeModel's\n editable properties.\n :param editable: bool\n defaults to True,\n False disables most editing methods.\n :return:\n None\n \"\"\"\n self.editable = editable\n self._columnDtypeModel.setEditable(self.editable)\n\n def dataFrameColumns(self):\n \"\"\"\n :return: list containing dataframe columns\n \"\"\"\n return self._dataFrame.columns.tolist()\n\n def addDataFrameColumn(self, columnName, dtype=str, defaultValue=None):\n \"\"\"\n Adds a column to the dataframe as long as\n the model's editable property is set to True and the\n dtype is supported.\n\n :param columnName: str\n name of the column.\n :param dtype: qtpandas.models.SupportedDtypes option\n :param defaultValue: (object)\n to default the column's value to, should be the same as the dtype or None\n :return: (bool)\n True on success, False otherwise.\n \"\"\"\n if not self.editable or dtype not in SupportedDtypes.allTypes():\n return False\n\n elements = self.rowCount()\n columnPosition = self.columnCount()\n\n newColumn = pandas.Series([defaultValue]*elements, index=self._dataFrame.index, dtype=dtype)\n\n self.beginInsertColumns(QtCore.QModelIndex(), columnPosition - 1, columnPosition - 1)\n try:\n self._dataFrame.insert(columnPosition, columnName, newColumn, allow_duplicates=False)\n except ValueError as e:\n # columnName does already exist\n return False\n\n self.endInsertColumns()\n\n self.propagateDtypeChanges(columnPosition, newColumn.dtype)\n\n return True\n\n def addDataFrameRows(self, count=1):\n \"\"\"\n\n Adds rows to the dataframe.\n\n :param count: (int)\n The number of rows to add to the dataframe.\n :return: (bool)\n True on success, False on failure.\n\n \"\"\"\n # don't allow any gaps in the data rows.\n # and always append at the end\n\n if not self.editable:\n return False\n\n position = self.rowCount()\n\n if count < 1:\n return False\n\n if len(self.dataFrame().columns) == 0:\n # log an error message or warning\n return False\n\n # Note: This function emits the rowsAboutToBeInserted() signal which\n # connected views (or proxies) must handle before the data is\n # inserted. Otherwise, the views may end up in an invalid state.\n self.beginInsertRows(QtCore.QModelIndex(), position, position + count - 1)\n\n defaultValues = []\n for dtype in self._dataFrame.dtypes:\n if dtype.type == numpy.dtype('<M8[ns]'):\n val = pandas.Timestamp('')\n elif dtype.type == numpy.dtype(object):\n val = ''\n else:\n val = dtype.type()\n defaultValues.append(val)\n\n for i in range(count):\n self._dataFrame.loc[position + i] = defaultValues\n self._dataFrame.reset_index()\n self.endInsertRows()\n return True\n\n def removeDataFrameColumns(self, columns):\n \"\"\"\n Removes columns from the dataframe.\n :param columns: [(int, str)]\n :return: (bool)\n True on success, False on failure.\n \"\"\"\n if not self.editable:\n return False\n\n if columns:\n deleted = 0\n errored = False\n for (position, name) in columns:\n position = position - deleted\n if position < 0:\n position = 0\n self.beginRemoveColumns(QtCore.QModelIndex(), position, position)\n try:\n self._dataFrame.drop(name, axis=1, inplace=True)\n except ValueError as e:\n errored = True\n continue\n self.endRemoveColumns()\n deleted += 1\n self.dataChanged.emit()\n\n if errored:\n return False\n else:\n return True\n return False\n\n def removeDataFrameRows(self, rows):\n \"\"\"\n Removes rows from the dataframe.\n\n :param rows: (list)\n of row indexes to removes.\n :return: (bool)\n True on success, False on failure.\n \"\"\"\n if not self.editable:\n return False\n\n if rows:\n position = min(rows)\n count = len(rows)\n self.beginRemoveRows(QtCore.QModelIndex(), position, position + count - 1)\n\n removedAny = False\n for idx, line in self._dataFrame.iterrows():\n if idx in rows:\n removedAny = True\n self._dataFrame.drop(idx, inplace=True)\n\n if not removedAny:\n return False\n\n self._dataFrame.reset_index(inplace=True, drop=True)\n\n self.endRemoveRows()\n return True\n return False\n" ]
[ [ "pandas.Series", "numpy.dtype", "numpy.finfo", "pandas.DataFrame", "numpy.iinfo", "numpy.float64", "pandas.Timestamp", "numpy.bool_", "pandas.read_sql" ] ]
tomerwei/Fusion360GalleryDataset
[ "3e1d58920ac56d271c01a7ba05749c5a05097152" ]
[ "tools/regraphnet/src/inference.py" ]
[ "from __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport json\nimport time\nimport argparse\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom train import *\n\ndef load_graph_pair(path_tar,path_cur,bbox):\n action_type_dict={'CutFeatureOperation':1,'IntersectFeatureOperation':2,'JoinFeatureOperation':0,'NewBodyFeatureOperation':3,'NewComponentFeatureOperation':4}\n operation_names=['JoinFeatureOperation','CutFeatureOperation','IntersectFeatureOperation','NewBodyFeatureOperation','NewComponentFeatureOperation']\n with open(path_tar) as json_data:\n data_tar=json.load(json_data)\n adj_tar,features_tar=format_graph_data(data_tar,bbox)\n if not path_cur:\n adj_cur,features_cur=torch.zeros((0)),torch.zeros((0))\n else:\n with open(path_cur) as json_data:\n data_cur=json.load(json_data)\n adj_cur,features_cur=format_graph_data(data_cur,bbox)\n graph_pair_formatted=[adj_tar,features_tar,adj_cur,features_cur]\n node_names=[x['id'] for x in data_tar['nodes']]\n return graph_pair_formatted,node_names,operation_names\n\ndef inference(graph_pair_formatted,node_names,operation_names,use_gpu=False):\n model.eval()\n num_nodes=graph_pair_formatted[1].size()[0]\n output_end_conditioned=np.zeros((num_nodes,num_nodes))\n with torch.no_grad():\n graph_pair_formatted.append(0)\n output_start,_,output_op=model(graph_pair_formatted,use_gpu=use_gpu)\n output_start=F.softmax(output_start.view(1,-1),dim=1)\n output_op=F.softmax(output_op,dim=1)\n for i in range(num_nodes):\n graph_pair_formatted[4]=i\n _,output_end,_=model(graph_pair_formatted,use_gpu=use_gpu)\n output_end=F.softmax(output_end.view(1,-1),dim=1)\n if use_gpu:\n output_end_conditioned[i,:]=output_end.data.cpu().numpy()\n else:\n output_end_conditioned[i,:]=output_end.data.numpy()\n if use_gpu:\n ps=[output_start.data.cpu().numpy()[0,:],output_end_conditioned,output_op.data.cpu().numpy()[0,:]]\n else:\n ps=[output_start.data.numpy()[0,:],output_end_conditioned,output_op.data.numpy()[0,:]]\n # enumerate all actions\n actions,probs=[],[]\n for i in range(len(node_names)):\n for j in range(len(node_names)):\n for k in range(len(operation_names)):\n actions.append([node_names[i],node_names[j],operation_names[k]])\n probs.append(ps[0][i]*ps[1][i,j]*ps[2][k])\n actions_sorted,probs_sorted=[],[]\n idx=np.argsort(-np.array(probs))\n for i in range(len(probs)):\n actions_sorted.append(actions[idx[i]])\n probs_sorted.append(probs[idx[i]])\n return actions_sorted,probs_sorted\n\nif __name__==\"__main__\":\n # args\n parser=argparse.ArgumentParser()\n parser.add_argument('--no-cuda',action='store_true',default=True,help='Disables CUDA training.')\n parser.add_argument('--dataset',type=str,default='data',help='Dataset name.')\n args=parser.parse_args()\n args.cuda=not args.no_cuda and torch.cuda.is_available()\n # load model\n model=NodePointer(nfeat=708,nhid=256)\n checkpoint_file='../ckpt/model_mpn.ckpt'\n if args.cuda:\n model.load_state_dict(torch.load(checkpoint_file))\n model.cuda()\n else:\n model.load_state_dict(\n torch.load(checkpoint_file, map_location=torch.device(\"cpu\"))\n )\n # inference\n t1=time.time()\n for seq in ['31962_e5291336_0054']:\n # load _sequence.json, as an example\n path_seq='../%s/%s_sequence.json'%(args.dataset,seq)\n if not os.path.isfile(path_seq):\n continue\n with open(path_seq) as json_data:\n data_seq=json.load(json_data)\n path_tar='../%s/%s'%(args.dataset,data_seq['sequence'][-1]['graph'])\n bbox=data_seq['properties']['bounding_box']\n for t in range(len(data_seq['sequence'])):\n # load and format graph data from json files\n if t==0:\n path_cur=None\n else:\n path_cur='../%s/%s_%04d.json'%(args.dataset,seq,t-1)\n graph_pair_formatted,node_names,operation_names=load_graph_pair(path_tar,path_cur,bbox)\n if args.cuda:\n for j in range(4):\n graph_pair_formatted[j]=graph_pair_formatted[j].cuda()\n # inference\n actions_sorted,probs_sorted=inference(graph_pair_formatted,node_names,operation_names,use_gpu=args.cuda)\n print(actions_sorted[:10])\n print(probs_sorted[:10])\n t2=time.time()\n print('%.5f seconds.'%(t2-t1))\n" ]
[ [ "torch.nn.functional.softmax", "torch.zeros", "torch.load", "torch.no_grad", "torch.cuda.is_available", "torch.device", "numpy.array", "numpy.zeros" ] ]
mholts1020/Challenge-5-module
[ "d26eaac00b761bfc625e2e15573e26b256a9e843" ]
[ "MCForecastTools.py" ]
[ "# Import libraries and dependencies\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\nimport alpaca_trade_api as tradeapi\r\nimport datetime as dt\r\nimport pytz\r\n\r\nclass MCSimulation:\r\n \"\"\"\r\n A Python class for runnning Monte Carlo simulation on portfolio price data. \r\n \r\n ...\r\n \r\n Attributes\r\n ----------\r\n portfolio_data : pandas.DataFrame\r\n portfolio dataframe\r\n weights: list(float)\r\n portfolio investment breakdown\r\n nSim: int\r\n number of samples in simulation\r\n nTrading: int\r\n number of trading days to simulate\r\n simulated_return : pandas.DataFrame\r\n Simulated data from Monte Carlo\r\n confidence_interval : pandas.Series\r\n the 95% confidence intervals for simulated final cumulative returns\r\n \r\n \"\"\"\r\n \r\n def __init__(self, portfolio_data, weights=\"\", num_simulation=1000, num_trading_days=252):\r\n \"\"\"\r\n Constructs all the necessary attributes for the MCSimulation object.\r\n\r\n Parameters\r\n ----------\r\n portfolio_data: pandas.DataFrame\r\n DataFrame containing stock price information from Alpaca API\r\n weights: list(float)\r\n A list fractions representing percentage of total investment per stock. DEFAULT: Equal distribution\r\n num_simulation: int\r\n Number of simulation samples. DEFAULT: 1000 simulation samples\r\n num_trading_days: int\r\n Number of trading days to simulate. DEFAULT: 252 days (1 year of business days)\r\n \"\"\"\r\n \r\n # Check to make sure that all attributes are set\r\n if not isinstance(portfolio_data, pd.DataFrame):\r\n raise TypeError(\"portfolio_data must be a Pandas DataFrame\")\r\n \r\n # Set weights if empty, otherwise make sure sum of weights equals one.\r\n if weights == \"\":\r\n num_stocks = len(portfolio_data.columns.get_level_values(0).unique())\r\n weights = [1.0/num_stocks for s in range(0,num_stocks)]\r\n else:\r\n if round(sum(weights),2) < .99:\r\n raise AttributeError(\"Sum of portfolio weights must equal one.\")\r\n \r\n # Calculate daily return if not within dataframe\r\n if not \"daily_return\" in portfolio_data.columns.get_level_values(1).unique():\r\n close_df = portfolio_data.xs('close',level=1,axis=1).pct_change()\r\n tickers = portfolio_data.columns.get_level_values(0).unique()\r\n column_names = [(x,\"daily_return\") for x in tickers]\r\n close_df.columns = pd.MultiIndex.from_tuples(column_names)\r\n portfolio_data = portfolio_data.merge(close_df,left_index=True,right_index=True).reindex(columns=tickers,level=0) \r\n \r\n # Set class attributes\r\n self.portfolio_data = portfolio_data\r\n self.weights = weights\r\n self.nSim = num_simulation\r\n self.nTrading = num_trading_days\r\n self.simulated_return = \"\"\r\n \r\n def calc_cumulative_return(self):\r\n \"\"\"\r\n Calculates the cumulative return of a stock over time using a Monte Carlo simulation (Brownian motion with drift).\r\n\r\n \"\"\"\r\n \r\n # Get closing prices of each stock\r\n last_prices = self.portfolio_data.xs('close',level=1,axis=1)[-1:].values.tolist()[0]\r\n \r\n # Calculate the mean and standard deviation of daily returns for each stock\r\n daily_returns = self.portfolio_data.xs('daily_return',level=1,axis=1)\r\n mean_returns = daily_returns.mean().tolist()\r\n std_returns = daily_returns.std().tolist()\r\n \r\n # Initialize empty Dataframe to hold simulated prices\r\n portfolio_cumulative_returns = pd.DataFrame()\r\n \r\n # Run the simulation of projecting stock prices 'nSim' number of times\r\n for n in range(self.nSim):\r\n \r\n if n % 10 == 0:\r\n print(f\"Running Monte Carlo simulation number {n}.\")\r\n \r\n # Create a list of lists to contain the simulated values for each stock\r\n simvals = [[p] for p in last_prices]\r\n \r\n # For each stock in our data:\r\n for s in range(len(last_prices)):\r\n\r\n # Simulate the returns for each trading day\r\n for i in range(self.nTrading):\r\n \r\n # Calculate the simulated price using the last price within the list\r\n simvals[s].append(simvals[s][-1] * (1 + np.random.normal(mean_returns[s], std_returns[s])))\r\n \r\n # Calculate the daily returns of simulated prices\r\n sim_df = pd.DataFrame(simvals).T.pct_change()\r\n \r\n # Use the `dot` function with the weights to multiply weights with each column's simulated daily returns\r\n sim_df = sim_df.dot(self.weights)\r\n \r\n # Calculate the normalized, cumulative return series\r\n portfolio_cumulative_returns[n] = (1 + sim_df.fillna(0)).cumprod()\r\n \r\n # Set attribute to use in plotting\r\n self.simulated_return = portfolio_cumulative_returns\r\n \r\n # Calculate 95% confidence intervals for final cumulative returns\r\n self.confidence_interval = portfolio_cumulative_returns.iloc[-1, :].quantile(q=[0.025, 0.975])\r\n \r\n return portfolio_cumulative_returns\r\n \r\n def plot_simulation(self):\r\n \"\"\"\r\n Visualizes the simulated stock trajectories using calc_cumulative_return method.\r\n\r\n \"\"\" \r\n \r\n # Check to make sure that simulation has run previously. \r\n if not isinstance(self.simulated_return,pd.DataFrame):\r\n self.calc_cumulative_return()\r\n \r\n # Use Pandas plot function to plot the return data\r\n plot_title = f\"{self.nSim} Simulations of Cumulative Portfolio Return Trajectories Over the Next {self.nTrading} Trading Days.\"\r\n return self.simulated_return.plot(legend=None,title=plot_title)\r\n \r\n def plot_distribution(self):\r\n \"\"\"\r\n Visualizes the distribution of cumulative returns simulated using calc_cumulative_return method.\r\n\r\n \"\"\"\r\n \r\n # Check to make sure that simulation has run previously. \r\n if not isinstance(self.simulated_return,pd.DataFrame):\r\n self.calc_cumulative_return()\r\n \r\n # Use the `plot` function to create a probability distribution histogram of simulated ending prices\r\n # with markings for a 95% confidence interval\r\n plot_title = f\"Distribution of Final Cumuluative Returns Across All {self.nSim} Simulations\"\r\n plt = self.simulated_return.iloc[-1, :].plot(kind='hist', bins=10,density=True,title=plot_title)\r\n plt.axvline(self.confidence_interval.iloc[0], color='r')\r\n plt.axvline(self.confidence_interval.iloc[1], color='r')\r\n return plt\r\n \r\n def summarize_cumulative_return(self):\r\n \"\"\"\r\n Calculate final summary statistics for Monte Carlo simulated stock data.\r\n \r\n \"\"\"\r\n \r\n # Check to make sure that simulation has run previously. \r\n if not isinstance(self.simulated_return,pd.DataFrame):\r\n self.calc_cumulative_return()\r\n \r\n metrics = self.simulated_return.iloc[-1].describe()\r\n ci_series = self.confidence_interval\r\n ci_series.index = [\"95% CI Lower\",\"95% CI Upper\"]\r\n return metrics.append(ci_series)" ]
[ [ "numpy.random.normal", "pandas.MultiIndex.from_tuples", "pandas.DataFrame" ] ]
jiangyy12/application-tracking-system
[ "6de2d98351df65d43c09a5739c3c3dbdf3bf78d3" ]
[ "backend/app.py" ]
[ "#importing required python libraries\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS, cross_origin\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom itertools import islice\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom data.connection import query, insert, count, querySchool, countProgram, queryItem, query_groupByCompany\nimport pandas as pd\nimport json\nimport os\nimport csv\nfrom flask_jwt_extended import create_access_token\nfrom flask_jwt_extended import get_jwt_identity\nfrom flask_jwt_extended import jwt_required\nfrom flask_jwt_extended import JWTManager\napp = Flask(__name__)\napp.config[\"JWT_SECRET_KEY\"] = os.environ.get('JWT_SECRET') # Change this!\njwt = JWTManager(app)\n# make flask support CORS\nCORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n\n# testing API, you can try to access http://localhost:5000/ on your browser after starting the server\n# params:\n# -name: string\[email protected](\"/\")\n@cross_origin()\ndef hello():\n name = request.args.get('name') if request.args.get('name') else ''\n obj = {\n \"str\": \"Hello World!\"+name\n }\n return jsonify(obj)\n\n# saerch function\n# params:\n# -keywords: string\[email protected](\"/search\")\ndef search():\n keywords = request.args.get('keywords')\n keywords = keywords.replace(' ', '+')\n\n # create a url for a crawler to fetch job information\n url = \"https://www.google.com/search?q=\" + keywords + \"&ibp=htl;jobs\"\n\n driver = webdriver.Chrome(ChromeDriverManager().install())\n driver.get(url)\n content = driver.page_source\n driver.close()\n soup = BeautifulSoup(content)\n\n # parsing searching results to DataFrame and return\n df = pd.DataFrame(columns=[\"jobTitle\", \"companyName\", \"location\"])\n mydivs = soup.find_all(\"div\", {\"class\": \"PwjeAc\"})\n for i, div in enumerate(mydivs):\n df.at[i, \"jobTitle\"] = div.find(\"div\", {\"class\": \"BjJfJf PUpOsf\"}).text\n df.at[i, \"companyName\"] = div.find(\"div\", {\"class\": \"vNEEBe\"}).text\n df.at[i, \"location\"] = div.find(\"div\", {\"class\": \"Qk80Jf\"}).text\n return jsonify(df.to_dict('records'))\n\n# get data from the CSV file for rendering root page\[email protected](\"/application\", methods=['GET'])\ndef getDataFromCSV():\n try:\n results = query()\n result = []\n for row in results:\n if (len(row) == 5):\n dic = {}\n dic['jobTitle'] = row[0]\n dic['companyName'] = row[1]\n dic['date'] = row[2].strftime(\"%Y-%m-%d\")\n dic['class'] = str(row[3])\n dic['id'] = str(row[4])\n result.append(dic)\n \n json_str = json.dumps(result)\n return json_str\n except Exception as e: \n print(e)\n exit(1)\n\[email protected](\"/applicationSummaryPage\", methods=['GET'])\ndef getCompanySummaryPage():\n try:\n results = query_groupByCompany()\n result = []\n for row in results:\n if (len(row) == 4):\n dic = {}\n dic['companyName'] = row[0]\n dic['Waiting'] = row[1]\n dic['Offer'] = row[2]\n dic['Rejected'] = row[3]\n result.append(dic)\n\n # json_str = json.dumps(result)\n json_str = jsonify(result)\n return json_str\n except Exception as e:\n print(e)\n exit(1)\n\n# write a new record to the CSV file \[email protected](\"/application\", methods=['POST'])\ndef editcsv():\n # todo: imply database\n csvTitle = ['jobTitle', 'companyName', 'date', 'class', 'id']\n tables = ['application', 'job']\n application = request.get_json()['application']\n data = {}\n for t in csvTitle:\n if (t is 'jobTitle'):\n data['jobName'] = application[t]\n if (t is 'companyName'):\n data['jobCompany'] = application[t]\n if (t is 'date'):\n data['jobReleaseDate'] = application[t]\n data['updateTime'] = application[t]\n if (t is 'class'):\n data['applyStatus'] = application[t]\n data['jobClass'] = application[t]\n if (t is 'id'):\n data['jobId'] = application[t]\n # newLine.append(application[t] if t in application else None)\n\n try:\n for table in tables:\n insert(table, data)\n\n except Exception as e: \n print(e)\n exit(1)\n return jsonify('Create an application succeddfully!')\n\[email protected](\"/school\", methods=['GET'])\ndef getDataFromDB():\n try:\n results = querySchool()\n result = []\n for row in results:\n if (len(row) == 5):\n dic = {}\n dic['programTitle'] = row[0]\n dic['schoolName'] = row[1]\n dic['date'] = row[2].strftime(\"%Y-%m-%d\")\n dic['class'] = str(row[3])\n dic['id'] = str(row[4])\n result.append(dic)\n\n json_str = json.dumps(result)\n return json_str\n except Exception as e:\n print(e)\n exit(1)\n\n# write a new record to the CSV file\[email protected](\"/school\", methods=['POST'])\ndef editDB():\n # todo: imply database\n # path = \"./data/applications.csv\"\n csvTitle = ['programTitle', 'schoolName', 'date', 'class', 'id']\n tables = ['school', 'program']\n application = request.get_json()['school']\n data = {}\n for t in csvTitle:\n if (t is 'programTitle'):\n data['programName'] = application[t]\n if (t is 'schoolName'):\n data['programSchool'] = application[t]\n if (t is 'date'):\n data['jobReleaseDate'] = application[t]\n data['updateTime'] = application[t]\n if (t is 'class'):\n data['applyStatus'] = application[t]\n data['jobClass'] = application[t]\n if (t is 'id'):\n data['programId'] = application[t]\n # newLine.append(application[t] if t in application else None)\n\n try:\n for table in tables:\n insert(table, data)\n\n except Exception as e:\n print(e)\n exit(1)\n return jsonify('Create an school application succeddfully!')\n\[email protected](\"/note\", methods=['GET'])\ndef getDataFromDB2():\n try:\n results = queryItem()\n result = []\n for row in results:\n if (len(row) == 5):\n dic = {}\n dic['jobName'] = row[0]\n dic['jobCompany'] = row[1]\n dic['commentTime'] = row[2].strftime(\"%Y-%m-%d\")\n dic['class'] = str(row[3])\n dic['id'] = str(row[4])\n result.append(dic)\n\n json_str = json.dumps(result)\n return json_str\n except Exception as e:\n print(e)\n exit(1)\n\n# get the biggest id in the CSV for creating a new application\[email protected](\"/getNewId\", methods=['GET'])\ndef getNewId():\n try:\n i = count() + 1\n return jsonify(i)\n except Exception as e: \n print(e)\n exit(1)\n\[email protected](\"/getNewProgramId\", methods=['GET'])\ndef getNewProgramId():\n path = \"./data/applications.csv\"\n try:\n i = countProgram() + 1\n return jsonify(i)\n except Exception as e:\n print(e)\n exit(1)\n\[email protected](\"/token\", methods=['POST'])\ndef create_token():\n # data = json.loads(request.get_data())\n # print(data)\n email = request.json.get(\"email\", None)\n password = request.json.get(\"password\", None)\n if email != \"test\" or password != \"test\":\n return jsonify({\"msg\": \"Bad username or password\"}), 401\n access_token = create_access_token(identity=email)\n return jsonify(access_token=access_token)\n\nif __name__ == \"__main__\":\n app.run()\n" ]
[ [ "pandas.DataFrame" ] ]
TuxStory/LCsvGraph
[ "61abca24692a2230597a3736c677322adb262e74" ]
[ "LottoCSVGraph.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport csv, statistics\n\ndef DataList():\n path =\"./LottoGameData.csv\"\n file = open(path,newline='')\n reader = csv.reader(file)\n header = next(reader) #first line is the reader\n data = []\n for row in reader:\n # row = [date, n1, n2, n3, n4, n5, n6 ,n7]\n num = list(map(int,row[1:8])) #map applique la fontion int à tout le tableau qui était String\n data.append(num)\n return data\n\ndef Graphique(chiffre,counts):\n #y_pos = np.arange(len(counts))\n plt.subplot(211)\n bar = plt.bar(range(len(chiffre)), counts, align='center', alpha=0.5,color='g')\n plt.xticks(range(len(counts)),chiffre)\n plt.title(\"Lotto Stats by Number\")\n plt.grid()\n\ndef Graphique2(x,y):\n plt.subplot(212)\n plt.title(\"Lotto Stats by Counts\")\n bar = plt.bar(range(len(x)), y, align='center', alpha=0.5)\n plt.xticks(range(len(y)),x)\n plt.grid()\n\ndef Statistique(Tirage,Tab):\n print(\"Statistique\".center(25,\"*\"))\n print(\"Nombre de Tirages :\", Tirage)\n chiffre, counts = zip(*Tab)\n print(\"Nombre moyen de tirage d'un chiffre:\", statistics.mean(counts))\n print(\"Rang 1 : 1/8.145.060\")\n for i in range(len(Tab)):\n print(\"Chiffre:\",Tab[i][0],\" - Nombre de Tirage\",Tab[i][1], Tab[i][1]/Tirage*100,\"%\")\n\ndef main():\n Stat = DataList()\n #print(Stat)\n chiffre, counts = np.unique(Stat, return_counts=True)\n c = list(zip(chiffre, counts))\n print(c)\n tri = sorted(c, key=lambda c: c[1])\n print(\"TRI\".center(25,\"*\"))\n print(tri)\n x, y = zip(*tri)\n tirages = len(Stat)\n Statistique(tirages,c)\n Graphique(chiffre,counts)\n Graphique2(x,y)\n plt.show()\n\nif __name__==\"__main__\":\n main()\n\n\n" ]
[ [ "matplotlib.pyplot.title", "numpy.unique", "matplotlib.pyplot.subplot", "matplotlib.pyplot.grid", "matplotlib.pyplot.show" ] ]
Haichao-Zhang/leap
[ "4d75961ff2ff203d4412633cbeb12889de3c79b6" ]
[ "railrl/torch/pytorch_util.py" ]
[ "import torch\nimport numpy as np\nfrom torch.autograd import Variable as TorchVariable\nfrom torch.nn import functional as F\n\n\ndef soft_update_from_to(source, target, tau):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - tau) + param.data * tau\n )\n\n\ndef copy_model_params_from_to(source, target):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)\n\n\ndef maximum_2d(t1, t2):\n # noinspection PyArgumentList\n return torch.max(\n torch.cat((t1.unsqueeze(2), t2.unsqueeze(2)), dim=2),\n dim=2,\n )[0].squeeze(2)\n\n\ndef kronecker_product(t1, t2):\n \"\"\"\n Computes the Kronecker product between two tensors\n See https://en.wikipedia.org/wiki/Kronecker_product\n \"\"\"\n t1_height, t1_width = t1.size()\n t2_height, t2_width = t2.size()\n out_height = t1_height * t2_height\n out_width = t1_width * t2_width\n\n # TODO(vitchyr): see if you can use expand instead of repeat\n tiled_t2 = t2.repeat(t1_height, t1_width)\n expanded_t1 = (\n t1.unsqueeze(2)\n .unsqueeze(3)\n .repeat(1, t2_height, t2_width, 1)\n .view(out_height, out_width)\n )\n\n return expanded_t1 * tiled_t2\n\n\ndef selu(\n x,\n alpha=1.6732632423543772848170429916717,\n scale=1.0507009873554804934193349852946,\n):\n \"\"\"\n Based on https://github.com/dannysdeng/selu/blob/master/selu.py\n \"\"\"\n return scale * (\n F.relu(x) + alpha * (F.elu(-1 * F.relu(-1 * x)))\n )\n\n\ndef softplus(x):\n \"\"\"\n PyTorch's softplus isn't (easily) serializable.\n \"\"\"\n return F.softplus(x)\n\n\ndef alpha_dropout(\n x,\n p=0.05,\n alpha=-1.7580993408473766,\n fixedPointMean=0,\n fixedPointVar=1,\n training=False,\n):\n keep_prob = 1 - p\n if keep_prob == 1 or not training:\n return x\n a = np.sqrt(fixedPointVar / (keep_prob * (\n (1 - keep_prob) * pow(alpha - fixedPointMean, 2) + fixedPointVar)))\n b = fixedPointMean - a * (\n keep_prob * fixedPointMean + (1 - keep_prob) * alpha)\n keep_prob = 1 - p\n\n random_tensor = keep_prob + torch.rand(x.size())\n binary_tensor = Variable(torch.floor(random_tensor))\n x = x.mul(binary_tensor)\n ret = x + alpha * (1 - binary_tensor)\n ret.mul_(a).add_(b)\n return ret\n\n\ndef alpha_selu(x, training=False):\n return alpha_dropout(selu(x), training=training)\n\n\ndef double_moments(x, y):\n \"\"\"\n Returns the first two moments between x and y.\n\n Specifically, for each vector x_i and y_i in x and y, compute their\n outer-product. Flatten this resulting matrix and return it.\n\n The first moments (i.e. x_i and y_i) are included by appending a `1` to x_i\n and y_i before taking the outer product.\n :param x: Shape [batch_size, feature_x_dim]\n :param y: Shape [batch_size, feature_y_dim]\n :return: Shape [batch_size, (feature_x_dim + 1) * (feature_y_dim + 1)\n \"\"\"\n batch_size, x_dim = x.size()\n _, y_dim = x.size()\n x = torch.cat((x, Variable(torch.ones(batch_size, 1))), dim=1)\n y = torch.cat((y, Variable(torch.ones(batch_size, 1))), dim=1)\n x_dim += 1\n y_dim += 1\n x = x.unsqueeze(2)\n y = y.unsqueeze(1)\n\n outer_prod = (\n x.expand(batch_size, x_dim, y_dim) * y.expand(batch_size, x_dim, y_dim)\n )\n return outer_prod.view(batch_size, -1)\n\n\ndef batch_diag(diag_values, diag_mask=None):\n batch_size, dim = diag_values.size()\n if diag_mask is None:\n diag_mask = torch.diag(torch.ones(dim))\n batch_diag_mask = diag_mask.unsqueeze(0).expand(batch_size, dim, dim)\n batch_diag_values = diag_values.unsqueeze(1).expand(batch_size, dim, dim)\n return batch_diag_values * batch_diag_mask\n\n\ndef batch_square_vector(vector, M):\n \"\"\"\n Compute x^T M x\n \"\"\"\n vector = vector.unsqueeze(2)\n return torch.bmm(torch.bmm(vector.transpose(2, 1), M), vector).squeeze(2)\n\n\ndef fanin_init(tensor):\n if isinstance(tensor, TorchVariable):\n return fanin_init(tensor.data)\n size = tensor.size()\n if len(size) == 2:\n fan_in = size[0]\n elif len(size) > 2:\n fan_in = np.prod(size[1:])\n else:\n raise Exception(\"Shape must be have dimension at least 2.\")\n bound = 1. / np.sqrt(fan_in)\n return tensor.uniform_(-bound, bound)\n\n\ndef fanin_init_weights_like(tensor):\n if isinstance(tensor, TorchVariable):\n return fanin_init(tensor.data)\n size = tensor.size()\n if len(size) == 2:\n fan_in = size[0]\n elif len(size) > 2:\n fan_in = np.prod(size[1:])\n else:\n raise Exception(\"Shape must be have dimension at least 2.\")\n bound = 1. / np.sqrt(fan_in)\n new_tensor = FloatTensor(tensor.size())\n new_tensor.uniform_(-bound, bound)\n return new_tensor\n\n\ndef almost_identity_weights_like(tensor):\n \"\"\"\n Set W = I + lambda * Gaussian no\n :param tensor:\n :return:\n \"\"\"\n shape = tensor.size()\n init_value = np.eye(*shape)\n init_value += 0.01 * np.random.rand(*shape)\n return FloatTensor(init_value)\n\n\ndef clip1(x):\n return torch.clamp(x, -1, 1)\n\ndef compute_conv_output_size(h_in, w_in, kernel_size, stride,padding=0):\n h_out = (h_in + 2 * padding - (kernel_size-1) - 1)/stride + 1\n w_out = (w_in + 2 * padding - (kernel_size-1) - 1)/stride + 1\n return int(np.floor(h_out)), int(np.floor(w_out))\n\ndef compute_deconv_output_size(h_in, w_in, kernel_size, stride, padding=0):\n h_out = (h_in -1)*stride - 2*padding + kernel_size\n w_out = (w_in -1)*stride - 2*padding + kernel_size\n return int(np.floor(h_out)), int(np.floor(w_out))\n\ndef compute_conv_layer_sizes(h_in, w_in, kernel_sizes, strides, pool_sizes, paddings=None):\n if paddings==None:\n for kernel, stride, pool in zip(kernel_sizes, strides, pool_sizes):\n h_in, w_in = compute_conv_output_size(h_in, w_in, kernel, stride)\n h_in, w_in = compute_conv_output_size(h_in, w_in, pool, pool)\n print('Output Size:', (h_in, w_in))\n else:\n for kernel, stride, pool, padding in zip(kernel_sizes, strides, pool_sizes, paddings):\n h_in, w_in = compute_conv_output_size(h_in, w_in, kernel, stride, padding=padding)\n h_in, w_in = compute_conv_output_size(h_in, w_in, pool, pool)\n print('Output Size:', (h_in, w_in))\n\ndef compute_deconv_layer_sizes(h_in, w_in, kernel_sizes, strides, paddings=None):\n if paddings==None:\n for kernel, stride in zip(kernel_sizes, strides):\n h_in, w_in = compute_deconv_output_size(h_in, w_in, kernel, stride)\n print('Output Size:', (h_in, w_in))\n else:\n for kernel, stride, padding in zip(kernel_sizes, strides, paddings):\n h_in, w_in = compute_deconv_output_size(h_in, w_in, kernel, stride, padding=padding)\n print('Output Size:', (h_in, w_in))\n\n\n\"\"\"\nGPU wrappers\n\"\"\"\n_use_gpu = False\n\n\ndef set_gpu_mode(mode):\n global _use_gpu\n _use_gpu = mode\n\n\ndef gpu_enabled():\n return _use_gpu\n\n\ndef set_device(gpu_id):\n torch.cuda.set_device(gpu_id)\n\n\n# noinspection PyPep8Naming\ndef FloatTensor(*args, **kwargs):\n if _use_gpu:\n return torch.cuda.FloatTensor(*args, **kwargs)\n else:\n # noinspection PyArgumentList\n return torch.FloatTensor(*args, **kwargs)\n\n\ndef Variable(tensor, **kwargs):\n if _use_gpu and not tensor.is_cuda:\n return TorchVariable(tensor.cuda(), **kwargs)\n else:\n return TorchVariable(tensor, **kwargs)\n\n\ndef from_numpy(*args, **kwargs):\n if _use_gpu:\n return torch.from_numpy(*args, **kwargs).float().cuda()\n else:\n return torch.from_numpy(*args, **kwargs).float()\n\ndef from_numpy_double(*args, **kwargs):\n if _use_gpu:\n return torch.from_numpy(*args, **kwargs).double().cuda()\n else:\n return torch.from_numpy(*args, **kwargs).double()\n\n\ndef get_numpy(tensor):\n if isinstance(tensor, TorchVariable):\n return get_numpy(tensor.data)\n if _use_gpu:\n return tensor.cpu().numpy()\n return tensor.numpy()\n\n\ndef np_to_var(np_array, double=False, **kwargs):\n if np_array is None:\n return None\n\n if double:\n return Variable(from_numpy_double(np_array), **kwargs)\n else:\n return Variable(from_numpy(np_array), **kwargs)\n\n\ndef zeros(*sizes, out=None):\n tensor = torch.zeros(*sizes, out=out)\n if _use_gpu:\n tensor = tensor.cuda()\n return tensor\n\n\ndef ones(*sizes, out=None):\n tensor = torch.ones(*sizes, out=out)\n if _use_gpu:\n tensor = tensor.cuda()\n return tensor\n" ]
[ [ "torch.ones", "torch.floor", "torch.cuda.set_device", "torch.zeros", "numpy.sqrt", "numpy.eye", "torch.from_numpy", "torch.cuda.FloatTensor", "torch.nn.functional.relu", "torch.FloatTensor", "numpy.random.rand", "numpy.prod", "numpy.floor", "torch.clamp", "torch.nn.functional.softplus", "torch.autograd.Variable" ] ]
daniele21/Financial_Sentiment_Analysis
[ "3734733f2d1d291c81a6239de121edcce861b463" ]
[ "tests/network_test.py" ]
[ "import unittest\nimport logging\nimport torch\nfrom torch import nn\n\nfrom constants.config import MOVIE_DATASET, TOKENIZER, MAX_WORD_SENTENCE, SST_DATASET\nfrom scripts.datasets.dataloader import generate_dataloader\nfrom scripts.datasets.dataset import NN_Dataset\nfrom scripts.datasets.sst_dataset import Bert_NN_Dataset\nfrom scripts.models.nn_conv_model import ConvModel\nfrom scripts.models.nn_model import NetworkModel\nfrom scripts.models.nn_pretrained_model import Pretrained_Bert_Model\nfrom scripts.networks.conv_lstm_network import Conv1D_Network\nfrom scripts.networks.pretrained_bert import Pretrained_Bert_Network\nfrom scripts.pipelines.preprocessing_pipeline import preprocessing_pipeline\nfrom transformers import AdamW\n\n\nclass NetworkTest(unittest.TestCase):\n logger = logging.getLogger(__name__)\n logging.basicConfig(format='\\n%(asctime)s %(module)s %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)\n\n def test_conv_net(self):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n seed = 2021\n\n emb_dim = 100\n\n dataset_params = {'data_path': 'resources/preprocessed_data/cleaned_data_v1.csv',\n 'dataset_type': MOVIE_DATASET,\n 'preprocessed': True,\n # 'target_scaling': (0, 1),\n 'vectorization': TOKENIZER,\n # 'vector_params': {'ngram_range': (1, 3),\n # 'max_features': emb_dim},\n 'imbalance': None,\n # 'imb_params': {'random_state': seed,\n # 'k_neighbors': 3},\n 'train': True}\n\n dataloader_params = {'split_size': 0.7,\n 'shuffle': True,\n 'batch_size': 32,\n 'random_seed': seed}\n\n network_params = {'emb_dim': emb_dim,\n 'dataset_type': TOKENIZER,\n 'kernel_size': [3, 5, 7],\n 'out_channels': 10,\n 'stride': 1,\n 'padding': [0, 1, 2],\n 'pooling_kernel': 2,\n 'dropout': 0.4}\n\n training_params = {'epochs': 1,\n 'lr': 0.001}\n\n x, y, dataset, vectorizer = preprocessing_pipeline(dataset_params)\n network_params['n_words'] = vectorizer.get_n_words()\n\n dataloader = generate_dataloader(x, y, dataloader_params)\n network = Conv1D_Network(network_params)\n loss = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam\n\n model = NetworkModel(network, dataloader, loss, optimizer)\n model._init_optimizer(training_params['lr'])\n\n model.train(training_params['epochs'])\n\n def test_train_model(self):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n seed = 2021\n\n emb_dim = 100\n\n dataset_params = {'data_path': 'resources/preprocessed_data/cleaned_data_v1.csv',\n 'dataset_type': MOVIE_DATASET,\n 'preprocessed': True,\n # 'target_scaling': (0, 1),\n 'vectorization': TOKENIZER,\n # 'vector_params': {'ngram_range': (1, 3),\n # 'max_features': emb_dim},\n 'imbalance': None,\n # 'imb_params': {'random_state': seed,\n # 'k_neighbors': 3},\n 'train': True}\n\n dataloader_params = {'split_size': 0.7,\n 'shuffle': True,\n 'batch_size': 64,\n 'random_seed': seed,\n 'dataset_class': NN_Dataset}\n\n network_params = {'emb_dim': MAX_WORD_SENTENCE,\n 'dataset_type': TOKENIZER,\n 'kernel_size': [5, 7, 9],\n 'out_channels': 10,\n 'stride': 1,\n 'padding': [0, 1, 2],\n 'pooling_kernel': 2,\n 'dropout': 0.2,\n 'device': torch.device('cpu:0')}\n\n training_params = {'epochs': 10,\n 'lr': 0.001,\n 'save_dir': 'resources/models/',\n 'patience': 5}\n\n x, y, dataset, vectorizer = preprocessing_pipeline(dataset_params)\n network_params['n_words'] = vectorizer.get_n_words()\n\n dataloader = generate_dataloader(x, y, dataloader_params)\n network = Conv1D_Network(network_params).to(network_params['device'])\n loss = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam\n\n model = ConvModel(network, dataloader, loss, optimizer, training_params['save_dir'])\n model._init_optimizer(training_params['lr'])\n\n model.train(training_params['epochs'], patience=training_params['patience'])\n\n\n def test_bert_model(self):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n seed = 2021\n\n dataset_params = {'data_path': None,\n 'dataset_type': SST_DATASET,\n 'preprocessed': False,\n 'vectorization': None,\n 'imbalance': None,\n 'train': True}\n\n dataloader_params = {'split_size': 0.7,\n 'shuffle': True,\n 'batch_size': 64,\n 'random_seed': seed}\n\n network_params = {'dropout': 0.2,\n 'device': torch.device('cpu:0')}\n\n training_params = {'epochs': 10,\n 'lr': 0.001,\n 'save_dir': 'resources/models/',\n 'patience': 5}\n\n x, y, dataset, vectorizer = preprocessing_pipeline(dataset_params)\n\n dataloader = generate_dataloader(x, y, dataloader_params, Bert_NN_Dataset)\n network = Pretrained_Bert_Network(network_params).to(network_params['device'])\n loss = nn.BCELoss()\n optimizer = AdamW\n\n model = Pretrained_Bert_Model(network, dataloader, loss, optimizer, training_params['save_dir'], network_params['device'])\n model._init_optimizer(training_params['lr'])\n\n model.train(training_params['epochs'], patience=training_params['patience'])\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.device", "torch.nn.CrossEntropyLoss", "torch.nn.BCELoss" ] ]
HaiNguyen2903/Faster-RCNN
[ "78d249a07dae0c9dad095543dab6385394c8dc0a" ]
[ "data/dataset.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nimport torch as t\nfrom data.voc_dataset import VOCBboxDataset\nfrom skimage import transform as sktsf\n# import skimage.transform as sktsf\nfrom torchvision import transforms as tvtsf\nfrom data import util\nimport numpy as np\nfrom utils.config import opt\n\n\ndef inverse_normalize(img):\n if opt.caffe_pretrain:\n img = img + (np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1))\n return img[::-1, :, :]\n # approximate un-normalize for visualize\n return (img * 0.225 + 0.45).clip(min=0, max=1) * 255\n\n\ndef pytorch_normalze(img):\n \"\"\"\n https://github.com/pytorch/vision/issues/223\n return appr -1~1 RGB\n \"\"\"\n normalize = tvtsf.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n img = normalize(t.from_numpy(img))\n return img.numpy()\n\n\ndef caffe_normalize(img):\n \"\"\"\n return appr -125-125 BGR\n \"\"\"\n img = img[[2, 1, 0], :, :] # RGB-BGR\n img = img * 255\n mean = np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1)\n img = (img - mean).astype(np.float32, copy=True)\n return img\n\n\ndef preprocess(img, min_size=600, max_size=1000):\n \"\"\"Preprocess an image for feature extraction.\n\n The length of the shorter edge is scaled to :obj:`self.min_size`.\n After the scaling, if the length of the longer edge is longer than\n :param min_size:\n :obj:`self.max_size`, the image is scaled to fit the longer edge\n to :obj:`self.max_size`.\n\n After resizing the image, the image is subtracted by a mean image value\n :obj:`self.mean`.\n\n Args:\n img (~numpy.ndarray): An image. This is in CHW and RGB format.\n The range of its value is :math:`[0, 255]`.\n\n Returns:\n ~numpy.ndarray: A preprocessed image.\n\n \"\"\"\n C, H, W = img.shape\n scale1 = min_size / min(H, W)\n scale2 = max_size / max(H, W)\n scale = min(scale1, scale2)\n img = img / 255.\n img = sktsf.resize(img, (C, H * scale, W * scale), mode='reflect',anti_aliasing=False)\n # both the longer and shorter should be less than\n # max_size and min_size\n if opt.caffe_pretrain:\n normalize = caffe_normalize\n else:\n normalize = pytorch_normalze\n return normalize(img)\n\n\nclass Transform(object):\n\n def __init__(self, min_size=600, max_size=1000):\n self.min_size = min_size\n self.max_size = max_size\n\n def __call__(self, in_data):\n img, bbox, label = in_data\n _, H, W = img.shape\n img = preprocess(img, self.min_size, self.max_size)\n _, o_H, o_W = img.shape\n scale = o_H / H\n bbox = util.resize_bbox(bbox, (H, W), (o_H, o_W))\n\n # horizontally flip\n img, params = util.random_flip(\n img, x_random=True, return_param=True)\n bbox = util.flip_bbox(\n bbox, (o_H, o_W), x_flip=params['x_flip'])\n\n return img, bbox, label, scale\n\n\nclass Dataset:\n def __init__(self, opt):\n self.opt = opt\n self.db = VOCBboxDataset(opt.voc_data_dir)\n self.tsf = Transform(opt.min_size, opt.max_size)\n\n def __getitem__(self, idx):\n ori_img, bbox, label, difficult = self.db.get_example(idx)\n\n img, bbox, label, scale = self.tsf((ori_img, bbox, label))\n # TODO: check whose stride is negative to fix this instead copy all\n # some of the strides of a given numpy array are negative.\n return img.copy(), bbox.copy(), label.copy(), scale\n\n def __len__(self):\n return len(self.db)\n\n\nclass TestDataset:\n def __init__(self, opt, split='test', use_difficult=True):\n self.opt = opt\n self.db = VOCBboxDataset(opt.voc_data_dir, split=split, use_difficult=use_difficult)\n\n def __getitem__(self, idx):\n ori_img, bbox, label, difficult = self.db.get_example(idx)\n img = preprocess(ori_img)\n return img, ori_img.shape[1:], bbox, label, difficult\n\n def __len__(self):\n return len(self.db)\n" ]
[ [ "numpy.array", "torch.from_numpy" ] ]
turoger/df_Collate
[ "4c4266fc7074957ca44d5085e915ea6c47123193" ]
[ "dfCollate/dfCollate.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport os\nimport dask.dataframe as ddf\nimport pandas as pd\nimport numpy as np\n\n\n#\ndef big_ddf (directory_to_traits_table,\n directory_to_phenotypes, phenotype_list, filename_type,\n directory_to_MAF,\n gene_name, directory_to_rsid):\n '''\n Returns a big dask df with: 'SNP', 'ALLELE', 'iScore', 'BETA', 'PV', 'MAF', 'Gene' as headers\n '''\n\n trait_dict = ukbbTT_to_dict(directory_to_traits_table)\n pheno_dict = reader(directory_to_phenotypes, phenotype_list, filename_type, trait_dict)\n a_ddf = ddf_collapser(pheno_dict)\n a_ddf = ddf_MAF_Add(directory_to_MAF, a_ddf)\n a_ddf = ddf_Gene_Add(a_ddf, gene_name, directory_to_rsid)\n a_ddf = a_ddf.drop(columns = ['NSE'])\n\n return(a_ddf)\n\n\n#\ndef ukbbTT_to_dict (file_path):\n '''\n Takes a UKBB trait table file path, maps key's to Descriptions and returns a dictionary mapping\n '''\n\n traits_tb = pd.read_csv(file_path, sep = ',') # read in path of file\n\n key = traits_tb['key'].tolist()\n val = traits_tb['Description'].tolist()\n key_des_dict = {key[i]:val[i] for i in range(len(key))} # creates dictionary\n\n key_des_dict.update({'21001-0.0':'BMI (kg/m2)',\n '23104-0.0':'BMI Impedence (kg/m2)',\n '21002-0.0':'Weight(kg)',\n '23098-0.0':'Weight Impedence(kg)'}) # update dict vals with correct descriptors\n return(key_des_dict)\n\n\n# \ndef reader (directory, phenoID_list, filename, trait_dict):\n '''\n Takes a directory of phenotype folders and iterates through the folder's ID in phenoID_list. \n Inside each folder are potentially four different filename: `imputed.all`, `imputed.norm`, `geno.all`, and `geno.norm`.\n\n Parameters: \n directory: the path that holds all the phenotype folders\n phenoID_list: list of phenotype Id's you're interested in collating into a dataframe\n filename: which subset of imputed/genotyped and non-norm/normed files you want to collate\n trait_dict: a dictionary of ID's to Phenotype name mappings\n\n returns a dictionary based w/ phenoID_list as the key, and the imported dataframe as the value.\n '''\n\n pheno_ddf_ls = dict()\n os.chdir(directory)\n\n for i in phenoID_list:\n os.chdir(i)\n\n for fn in os.listdir():\n if fn.startswith(filename):\n val = ddf.read_csv(fn, sep = ' ',\n header = 0, # Ignore initial Headers\n names = ['SNP', 'ALLELE', 'iScore', 'BETA', 'NSE', 'PV'], # Specifies Headers\n dtype = {'SNP': object,\n 'ALLELE': object,\n 'iScore': np.float32,\n 'BETA': np.float32,\n 'NSE': np.float32,\n 'PV': np.float32}) # Change variable type to save memory\n val['Phenotype'] = trait_dict[i] # Creates new col with name of df as val\n pheno_ddf_ls.update({i:val})\n\n os.chdir('..')\n\n return(pheno_ddf_ls)\n\n\n#\ndef ddf_collapser(dict_df):\n '''\n Takes dict of ddf and collapses dict into a large ddf. Returns a big dask df\n '''\n\n frames = []\n for df_name in dict_df.keys():\n frames.append(dict_df[df_name]) #add each df to a list\n\n return(ddf.concat(frames))\n\n\n#\ndef ddf_MAF_Add(file_path, a_ddf):\n '''\n Takes a UKBB MAF txt file for the specified chromosome and cerates a SNP to MAF dict mapping\n Takes a dask df. Creates a new column, MAF, based on k:v pair in snp_dict for ddf in dict\n '''\n\n mfi_header = ['Loc','SNP', 'Position', 'Allele1', 'Allele2', 'MAF', 'MA', 'Info_score'] # Assigns header to csv\n mfi = pd.read_csv(file_path, names = mfi_header, sep = \"\\t\") # Reads tab sep file with names from mfi_header\n\n key = mfi['SNP'].tolist()\n val = mfi['MAF'].tolist()\n snp_maf_dict = {key[i]:val[i] for i in range(len(key))} # Creates dict mapping for Variant and MAF vals \n\n\n a_ddf['MAF'] = a_ddf['SNP'].map(snp_maf_dict) # Creates new col with MAF mapped to SNP\n a_ddf = (a_ddf\n .assign(MAF = lambda df: df['MAF'].astype(np.float32)) # Changes df data type for 'MAF'\n )\n return(a_ddf)\n\n\n# \ndef ddf_Gene_Add(a_ddf, gene, rsid_file_path):\n ''' \n Takes a dask df and sorts by the SNV found in rsid_file_path\n\n gene: Should be a string to name the values of ddf if the rsid lies within the gene\n rsid_file_path: Should be a directory to a text file with all the variants within a gene\n\n returns a dask df with in `Gene` or `not Gene`\n '''\n\n gene_rsid = open(rsid_file_path, 'r') # read in text file\n gene_rsid_ls = gene_rsid.readlines() # Add each line as index to a list\n\n gene_rsid_ls2 = [] # Strip newline escape, append to list\n for index in range(1, len(gene_rsid_ls)):\n rsid = gene_rsid_ls[index]\n gene_rsid_ls2.append(rsid.rstrip('\\n'))\n\n a_ddf['Gene'] = a_ddf['SNP'].isin(gene_rsid_ls2) # creates a col of T/F depending if in gene_rsid_ls2\n booleanDict = {True: gene, False: 'not_' + gene} # create a dict mapping T/F to a name\n a_ddf['Gene'] = a_ddf['Gene'].map(booleanDict) # change col vals based on mapping\n\n return(a_ddf)\n\n" ]
[ [ "pandas.read_csv" ] ]
tomiyee/6.806-Card-Translation
[ "9286b232f60ec53fabbbcbeca23ddd71cd6c3483" ]
[ "code/model.py" ]
[ "import torch\nimport torch.nn as nn\n\nclass Card2CodeModel(nn.Module):\n def __init__(self):\n self.linear = nn.Linear(5,5)\n \n def forward(self, inputs=None):\n return self.linear(inputs)\n" ]
[ [ "torch.nn.Linear" ] ]
seungjooshin/kornia
[ "5bfb8f2f23cc1d99647c36aa982132c5b7adbb6d" ]
[ "test/geometry/calibration/test_undistort.py" ]
[ "import pytest\nimport torch\nfrom torch.autograd import gradcheck\n\nfrom kornia.geometry.calibration.undistort import undistort_points\nfrom kornia.testing import assert_close\n\n\nclass TestUndistortion:\n def test_smoke(self, device, dtype):\n points = torch.rand(1, 2, device=device, dtype=dtype)\n K = torch.rand(3, 3, device=device, dtype=dtype)\n distCoeff = torch.rand(4, device=device, dtype=dtype)\n pointsu = undistort_points(points, K, distCoeff)\n assert points.shape == pointsu.shape\n\n points = torch.rand(1, 1, 2, device=device, dtype=dtype)\n K = torch.rand(1, 3, 3, device=device, dtype=dtype)\n distCoeff = torch.rand(1, 4, device=device, dtype=dtype)\n pointsu = undistort_points(points, K, distCoeff)\n assert points.shape == pointsu.shape\n\n @pytest.mark.parametrize(\n \"batch_size, num_points, num_distcoeff\", [(1, 3, 4), (2, 4, 5), (3, 5, 8), (4, 6, 12), (5, 7, 14)]\n )\n def test_shape(self, batch_size, num_points, num_distcoeff, device, dtype):\n B, N, Ndist = batch_size, num_points, num_distcoeff\n\n points = torch.rand(B, N, 2, device=device, dtype=dtype)\n K = torch.rand(B, 3, 3, device=device, dtype=dtype)\n distCoeff = torch.rand(B, Ndist, device=device, dtype=dtype)\n\n pointsu = undistort_points(points, K, distCoeff)\n assert points.shape == (B, N, 2)\n\n def test_opencv_five_coeff(self, device, dtype):\n # Test using 5 distortion coefficients\n pts = torch.tensor(\n [[1028.0374, 788.7520], [1025.1218, 716.8726], [1022.1792, 645.1857]], device=device, dtype=dtype\n )\n\n K = torch.tensor(\n [[1.7315e03, 0.0000e00, 6.2289e02], [0.0000e00, 1.7320e03, 5.3537e02], [0.0000e00, 0.0000e00, 1.0000e00]],\n device=device,\n dtype=dtype,\n )\n\n dist = torch.tensor([-0.1007, 0.2650, -0.0018, 0.0007, -0.2597], device=device, dtype=dtype)\n\n # Expected output generated with OpenCV:\n # import cv2\n # ptsu_expected = cv2.undistortPoints(pts.numpy().reshape(-1,1,2), K.numpy(),\n # dist1.numpy(), None, None, K.numpy()).reshape(-1,2)\n ptsu_expected = torch.tensor(\n [[1030.5992, 790.65533], [1027.3059, 718.10020], [1024.0700, 645.90600]], device=device, dtype=dtype\n )\n ptsu = undistort_points(pts, K, dist)\n assert_close(ptsu, ptsu_expected, rtol=1e-4, atol=1e-4)\n\n def test_opencv_all_coeff(self, device, dtype):\n # Test using 14 distortion coefficients\n pts = torch.tensor(\n [[1028.0374, 788.7520], [1025.1218, 716.8726], [1022.1792, 645.1857]], device=device, dtype=dtype\n )\n\n K = torch.tensor(\n [[1.7315e03, 0.0000e00, 6.2289e02], [0.0000e00, 1.7320e03, 5.3537e02], [0.0000e00, 0.0000e00, 1.0000e00]],\n device=device,\n dtype=dtype,\n )\n\n dist = torch.tensor(\n [\n -5.6388e-02,\n 2.3881e-01,\n 8.3374e-02,\n 2.0710e-03,\n 7.1349e00,\n 5.6335e-02,\n -3.1738e-01,\n 4.9981e00,\n -4.0287e-03,\n -2.8246e-02,\n -8.6064e-02,\n 1.5543e-02,\n -1.7322e-01,\n 2.3154e-03,\n ],\n device=device,\n dtype=dtype,\n )\n\n # Expected output generated with OpenCV:\n # import cv2\n # ptsu_expected = cv2.undistortPoints(pts.numpy().reshape(-1,1,2), K.numpy(),\n # dist2.numpy(), None, None, K.numpy()).reshape(-1,2)\n ptsu_expected = torch.tensor(\n [[1030.8245, 786.3807], [1027.5505, 715.0732], [1024.2753, 644.0319]], device=device, dtype=dtype\n )\n ptsu = undistort_points(pts, K, dist)\n assert_close(ptsu, ptsu_expected, rtol=1e-4, atol=1e-4)\n\n def test_opencv_stereo(self, device, dtype):\n # Udistort stereo points with data given in two batches using 14 distortion coefficients\n pts = torch.tensor(\n [\n [[1028.0374, 788.7520], [1025.1218, 716.8726], [1022.1792, 645.1857]],\n [[345.9135, 847.9113], [344.0880, 773.9890], [342.2381, 700.3029]],\n ],\n device=device,\n dtype=dtype,\n )\n\n K = torch.tensor(\n [\n [\n [3.3197e03, 0.0000e00, 6.1813e02],\n [0.0000e00, 3.3309e03, 5.2281e02],\n [0.0000e00, 0.0000e00, 1.0000e00],\n ],\n [\n [1.9206e03, 0.0000e00, 6.1395e02],\n [0.0000e00, 1.9265e03, 7.7164e02],\n [0.0000e00, 0.0000e00, 1.0000e00],\n ],\n ],\n device=device,\n dtype=dtype,\n )\n\n dist = torch.tensor(\n [\n [\n -5.6388e-02,\n 2.3881e-01,\n 8.3374e-02,\n 2.0710e-03,\n 7.1349e00,\n 5.6335e-02,\n -3.1738e-01,\n 4.9981e00,\n -4.0287e-03,\n -2.8246e-02,\n -8.6064e-02,\n 1.5543e-02,\n -1.7322e-01,\n 2.3154e-03,\n ],\n [\n 1.4050e-03,\n -3.0691e00,\n -1.0209e-01,\n -2.3687e-02,\n -1.7082e02,\n 4.3593e-03,\n -3.1904e00,\n -1.7050e02,\n 1.7854e-02,\n 1.8999e-02,\n 9.9122e-02,\n 3.6675e-02,\n 3.0816e-03,\n -5.7133e-02,\n ],\n ],\n device=device,\n dtype=dtype,\n )\n\n # Expected output generated with OpenCV:\n # import cv2\n # ptsu_expected1 = cv2.undistortPoints(pts[0].numpy().reshape(-1,1,2), K[0].numpy(),\n # dist[0].numpy(), None, None, K[0].numpy()).reshape(-1,2)\n # ptsu_expected2 = cv2.undistortPoints(pts[1].numpy().reshape(-1,1,2), K[1].numpy(),\n # dist[1].numpy(), None, None, K[1].numpy()).reshape(-1,2)\n ptsu_expected1 = torch.tensor(\n [[1029.3234, 785.4813], [1026.1599, 714.3689], [1023.02045, 643.5359]], device=device, dtype=dtype\n )\n\n ptsu_expected2 = torch.tensor(\n [[344.04456, 848.7696], [344.27606, 774.1254], [344.47018, 700.8522]], device=device, dtype=dtype\n )\n\n ptsu = undistort_points(pts, K, dist)\n assert_close(ptsu[0], ptsu_expected1, rtol=1e-4, atol=1e-4)\n assert_close(ptsu[1], ptsu_expected2, rtol=1e-4, atol=1e-4)\n\n def test_gradcheck(self, device):\n points = torch.rand(1, 8, 2, device=device, dtype=torch.float64, requires_grad=True)\n K = torch.rand(1, 3, 3, device=device, dtype=torch.float64)\n distCoeff = torch.rand(1, 4, device=device, dtype=torch.float64)\n\n assert gradcheck(undistort_points, (points, K, distCoeff), raise_exception=True)\n" ]
[ [ "torch.autograd.gradcheck", "torch.rand", "torch.tensor" ] ]
ZhaoZhangZZlab/eccDNA_formation_2021
[ "35bd3db1ab475bd440d7fe20c0856002f2cc73ba" ]
[ "Script/TEinsertion_convert.py" ]
[ "import pandas as pd\nimport os\nimport argparse\nimport re\nfrom Bio import SeqIO\nimport pysam\n\npd.set_option(\"display.max_columns\",40)\n\nparser=argparse.ArgumentParser()\nparser.add_argument(\"-bam\",\"--bamFile\")\nargs=parser.parse_args()\n\nbamFile=args.bamFile\n\n\nfrom TEinsertion_bamConverter import bamConverter\nbamConverter().ConverAlignment(bamFile)\n\n" ]
[ [ "pandas.set_option" ] ]
dbanys/glide-text2im
[ "9cc8e563851bd38f5ddb3e305127192cb0f02f5c", "5177545ec62f1fddc3075a8a69b63df3eb2256a5" ]
[ "glide_text2im/gaussian_diffusion.py", "notebooks/run_inpaint.py" ]
[ "\"\"\"\nSimplified from https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/gaussian_diffusion.py.\n\"\"\"\n\nimport math\n\nimport numpy as np\nimport torch as th\n\n\ndef _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):\n betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)\n warmup_time = int(num_diffusion_timesteps * warmup_frac)\n betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64)\n return betas\n\n\ndef get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):\n \"\"\"\n This is the deprecated API for creating beta schedules.\n\n See get_named_beta_schedule() for the new library of schedules.\n \"\"\"\n if beta_schedule == \"quad\":\n betas = (\n np.linspace(\n beta_start ** 0.5,\n beta_end ** 0.5,\n num_diffusion_timesteps,\n dtype=np.float64,\n )\n ** 2\n )\n elif beta_schedule == \"linear\":\n betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)\n elif beta_schedule == \"warmup10\":\n betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1)\n elif beta_schedule == \"warmup50\":\n betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5)\n elif beta_schedule == \"const\":\n betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)\n elif beta_schedule == \"jsd\": # 1/T, 1/(T-1), 1/(T-2), ..., 1\n betas = 1.0 / np.linspace(\n num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64\n )\n else:\n raise NotImplementedError(beta_schedule)\n assert betas.shape == (num_diffusion_timesteps,)\n return betas\n\n\ndef get_named_beta_schedule(schedule_name, num_diffusion_timesteps):\n \"\"\"\n Get a pre-defined beta schedule for the given name.\n\n The beta schedule library consists of beta schedules which remain similar\n in the limit of num_diffusion_timesteps.\n Beta schedules may be added, but should not be removed or changed once\n they are committed to maintain backwards compatibility.\n \"\"\"\n if schedule_name == \"linear\":\n # Linear schedule from Ho et al, extended to work for any number of\n # diffusion steps.\n scale = 1000 / num_diffusion_timesteps\n return get_beta_schedule(\n \"linear\",\n beta_start=scale * 0.0001,\n beta_end=scale * 0.02,\n num_diffusion_timesteps=num_diffusion_timesteps,\n )\n elif schedule_name == \"squaredcos_cap_v2\":\n return betas_for_alpha_bar(\n num_diffusion_timesteps,\n lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,\n )\n else:\n raise NotImplementedError(f\"unknown beta schedule: {schedule_name}\")\n\n\ndef betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):\n \"\"\"\n Create a beta schedule that discretizes the given alpha_t_bar function,\n which defines the cumulative product of (1-beta) over time from t = [0,1].\n\n :param num_diffusion_timesteps: the number of betas to produce.\n :param alpha_bar: a lambda that takes an argument t from 0 to 1 and\n produces the cumulative product of (1-beta) up to that\n part of the diffusion process.\n :param max_beta: the maximum beta to use; use values lower than 1 to\n prevent singularities.\n \"\"\"\n betas = []\n for i in range(num_diffusion_timesteps):\n t1 = i / num_diffusion_timesteps\n t2 = (i + 1) / num_diffusion_timesteps\n betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))\n return np.array(betas)\n\n\nclass GaussianDiffusion:\n \"\"\"\n Utilities for training and sampling diffusion models.\n\n Original ported from this codebase:\n https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42\n\n :param betas: a 1-D numpy array of betas for each diffusion timestep,\n starting at T and going to 1.\n \"\"\"\n\n def __init__(\n self,\n *,\n betas,\n ):\n # Use float64 for accuracy.\n betas = np.array(betas, dtype=np.float64)\n self.betas = betas\n assert len(betas.shape) == 1, \"betas must be 1-D\"\n assert (betas > 0).all() and (betas <= 1).all()\n\n self.num_timesteps = int(betas.shape[0])\n\n alphas = 1.0 - betas\n self.alphas_cumprod = np.cumprod(alphas, axis=0)\n self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])\n self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)\n assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)\n self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)\n self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)\n self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)\n self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)\n\n # calculations for posterior q(x_{t-1} | x_t, x_0)\n self.posterior_variance = (\n betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)\n )\n # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain\n self.posterior_log_variance_clipped = np.log(\n np.append(self.posterior_variance[1], self.posterior_variance[1:])\n )\n self.posterior_mean_coef1 = (\n betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)\n )\n self.posterior_mean_coef2 = (\n (1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod)\n )\n\n def q_mean_variance(self, x_start, t):\n \"\"\"\n Get the distribution q(x_t | x_0).\n\n :param x_start: the [N x C x ...] tensor of noiseless inputs.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :return: A tuple (mean, variance, log_variance), all of x_start's shape.\n \"\"\"\n mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)\n log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)\n return mean, variance, log_variance\n\n def q_sample(self, x_start, t, noise=None):\n \"\"\"\n Diffuse the data for a given number of diffusion steps.\n\n In other words, sample from q(x_t | x_0).\n\n :param x_start: the initial data batch.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :param noise: if specified, the split-out normal noise.\n :return: A noisy version of x_start.\n \"\"\"\n if noise is None:\n noise = th.randn_like(x_start)\n assert noise.shape == x_start.shape\n return (\n _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise\n )\n\n def q_posterior_mean_variance(self, x_start, x_t, t):\n \"\"\"\n Compute the mean and variance of the diffusion posterior:\n\n q(x_{t-1} | x_t, x_0)\n\n \"\"\"\n assert x_start.shape == x_t.shape\n posterior_mean = (\n _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start\n + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t\n )\n posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)\n posterior_log_variance_clipped = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x_t.shape\n )\n assert (\n posterior_mean.shape[0]\n == posterior_variance.shape[0]\n == posterior_log_variance_clipped.shape[0]\n == x_start.shape[0]\n )\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):\n \"\"\"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n model_output = model(x, t, **model_kwargs)\n if isinstance(model_output, tuple):\n model_output, extra = model_output\n else:\n extra = None\n\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n return x.clamp(-1, 1)\n return x\n\n pred_xstart = process_xstart(self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output))\n model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)\n\n assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n \"extra\": extra,\n }\n\n def _predict_xstart_from_eps(self, x_t, t, eps):\n assert x_t.shape == eps.shape\n return (\n _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps\n )\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (\n _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart\n ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n \"\"\"\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x. In particular, cond_fn computes grad(log(p(y|x))), and we want to\n condition on y.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n \"\"\"\n gradient = cond_fn(x, t, **model_kwargs)\n new_mean = p_mean_var[\"mean\"].float() + p_mean_var[\"variance\"] * gradient.float()\n return new_mean\n\n def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n \"\"\"\n Compute what the p_mean_variance output would have been, should the\n model's score function be conditioned by cond_fn.\n\n See condition_mean() for details on cond_fn.\n\n Unlike condition_mean(), this instead uses the conditioning strategy\n from Song et al (2020).\n \"\"\"\n alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)\n\n eps = self._predict_eps_from_xstart(x, t, p_mean_var[\"pred_xstart\"])\n eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs)\n\n out = p_mean_var.copy()\n out[\"pred_xstart\"] = self._predict_xstart_from_eps(x, t, eps)\n out[\"mean\"], _, _ = self.q_posterior_mean_variance(x_start=out[\"pred_xstart\"], x_t=x, t=t)\n return out\n\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n ):\n \"\"\"\n Sample x_{t-1} from the model at the given timestep.\n\n :param model: the model to sample from.\n :param x: the current tensor at x_{t-1}.\n :param t: the value of t, starting at 0 for the first diffusion step.\n :param clip_denoised: if True, clip the x_start prediction to [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample.\n :param cond_fn: if not None, this is a gradient function that acts\n similarly to the model.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict containing the following keys:\n - 'sample': a random sample from the model.\n - 'pred_xstart': a prediction of x_0.\n \"\"\"\n out = self.p_mean_variance(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n )\n noise = th.randn_like(x)\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n if cond_fn is not None:\n out[\"mean\"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)\n sample = out[\"mean\"] + nonzero_mask * th.exp(0.5 * out[\"log_variance\"]) * noise\n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n\n def p_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n ):\n \"\"\"\n Generate samples from the model.\n\n :param model: the model module.\n :param shape: the shape of the samples, (N, C, H, W).\n :param noise: if specified, the noise from the encoder to sample.\n Should be of the same shape as `shape`.\n :param clip_denoised: if True, clip x_start predictions to [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample.\n :param cond_fn: if not None, this is a gradient function that acts\n similarly to the model.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param device: if specified, the device to create the samples on.\n If not specified, use a model parameter's device.\n :param progress: if True, show a tqdm progress bar.\n :return: a non-differentiable batch of samples.\n \"\"\"\n final = None\n for sample in self.p_sample_loop_progressive(\n model,\n shape,\n noise=noise,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n cond_fn=cond_fn,\n model_kwargs=model_kwargs,\n device=device,\n progress=progress,\n ):\n final = sample\n return final[\"sample\"]\n\n def p_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n ):\n \"\"\"\n Generate samples from the model and yield intermediate samples from\n each timestep of diffusion.\n\n Arguments are the same as p_sample_loop().\n Returns a generator over dicts, where each dict is the return value of\n p_sample().\n \"\"\"\n if device is None:\n device = next(model.parameters()).device\n assert isinstance(shape, (tuple, list))\n if noise is not None:\n img = noise\n else:\n img = th.randn(*shape, device=device)\n indices = list(range(self.num_timesteps))[::-1]\n\n if progress:\n # Lazy import so that we don't depend on tqdm.\n from tqdm.auto import tqdm\n\n indices = tqdm(indices)\n\n for i in indices:\n t = th.tensor([i] * shape[0], device=device)\n with th.no_grad():\n out = self.p_sample(\n model,\n img,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n cond_fn=cond_fn,\n model_kwargs=model_kwargs,\n )\n yield out\n img = out[\"sample\"]\n\n def ddim_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n \"\"\"\n Sample x_{t-1} from the model using DDIM.\n\n Same usage as p_sample().\n \"\"\"\n out = self.p_mean_variance(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n )\n if cond_fn is not None:\n out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)\n\n # Usually our model outputs epsilon, but we re-derive it\n # in case we used x_start or x_prev prediction.\n eps = self._predict_eps_from_xstart(x, t, out[\"pred_xstart\"])\n\n alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)\n alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)\n sigma = (\n eta\n * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))\n * th.sqrt(1 - alpha_bar / alpha_bar_prev)\n )\n # Equation 12.\n noise = th.randn_like(x)\n mean_pred = (\n out[\"pred_xstart\"] * th.sqrt(alpha_bar_prev)\n + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps\n )\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n sample = mean_pred + nonzero_mask * sigma * noise\n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n\n def ddim_reverse_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n \"\"\"\n Sample x_{t+1} from the model using DDIM reverse ODE.\n \"\"\"\n assert eta == 0.0, \"Reverse ODE only for deterministic path\"\n out = self.p_mean_variance(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n )\n if cond_fn is not None:\n out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)\n # Usually our model outputs epsilon, but we re-derive it\n # in case we used x_start or x_prev prediction.\n eps = (\n _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x\n - out[\"pred_xstart\"]\n ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)\n alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)\n\n # Equation 12. reversed\n mean_pred = out[\"pred_xstart\"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps\n\n return {\"sample\": mean_pred, \"pred_xstart\": out[\"pred_xstart\"]}\n\n def ddim_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n ):\n \"\"\"\n Generate samples from the model using DDIM.\n\n Same usage as p_sample_loop().\n \"\"\"\n final = None\n for sample in self.ddim_sample_loop_progressive(\n model,\n shape,\n noise=noise,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n cond_fn=cond_fn,\n model_kwargs=model_kwargs,\n device=device,\n progress=progress,\n eta=eta,\n ):\n final = sample\n return final[\"sample\"]\n\n def ddim_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n ):\n \"\"\"\n Use DDIM to sample from the model and yield intermediate samples from\n each timestep of DDIM.\n\n Same usage as p_sample_loop_progressive().\n \"\"\"\n if device is None:\n device = next(model.parameters()).device\n assert isinstance(shape, (tuple, list))\n if noise is not None:\n img = noise\n else:\n img = th.randn(*shape, device=device)\n indices = list(range(self.num_timesteps))[::-1]\n\n if progress:\n # Lazy import so that we don't depend on tqdm.\n from tqdm.auto import tqdm\n\n indices = tqdm(indices)\n\n for i in indices:\n t = th.tensor([i] * shape[0], device=device)\n with th.no_grad():\n out = self.ddim_sample(\n model,\n img,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n cond_fn=cond_fn,\n model_kwargs=model_kwargs,\n eta=eta,\n )\n yield out\n img = out[\"sample\"]\n\n\ndef _extract_into_tensor(arr, timesteps, broadcast_shape):\n \"\"\"\n Extract values from a 1-D numpy array for a batch of indices.\n\n :param arr: the 1-D numpy array.\n :param timesteps: a tensor of indices into the array to extract.\n :param broadcast_shape: a larger shape of K dimensions with the batch\n dimension equal to the length of timesteps.\n :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.\n \"\"\"\n res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()\n while len(res.shape) < len(broadcast_shape):\n res = res[..., None]\n return res + th.zeros(broadcast_shape, device=timesteps.device)\n", "# Run this line to install the package if it is\n# not already installed.\nfrom typing import Tuple\n\nfrom IPython.display import display\nfrom PIL import Image\nimport numpy as np\nimport torch as th\nimport torch.nn.functional as F\nimport argparse\nfrom glide_text2im.download import load_checkpoint\nfrom glide_text2im.model_creation import (\n create_model_and_diffusion,\n model_and_diffusion_defaults,\n model_and_diffusion_defaults_upsampler\n)\n# This notebook supports both CPU and GPU.\n# On CPU, generating one sample may take on the order of 20 minutes.\n# On a GPU, it should be under a minute.\n\nhas_cuda = th.cuda.is_available()\ndevice = th.device('cpu' if not has_cuda else 'cuda')\n# Create base model.\noptions = model_and_diffusion_defaults()\noptions['inpaint'] = True\noptions['use_fp16'] = has_cuda\noptions['timestep_respacing'] = '100' # use 100 diffusion steps for fast sampling\nmodel, diffusion = create_model_and_diffusion(**options)\nmodel.eval()\nif has_cuda:\n model.convert_to_fp16()\nmodel.to(device)\nmodel.load_state_dict(load_checkpoint('base-inpaint', device))\nprint('total base parameters', sum(x.numel() for x in model.parameters()))\n# Create upsampler model.\noptions_up = model_and_diffusion_defaults_upsampler()\noptions_up['inpaint'] = True\noptions_up['use_fp16'] = has_cuda\noptions_up['timestep_respacing'] = 'fast27' # use 27 diffusion steps for very fast sampling\nmodel_up, diffusion_up = create_model_and_diffusion(**options_up)\nmodel_up.eval()\nif has_cuda:\n model_up.convert_to_fp16()\nmodel_up.to(device)\nmodel_up.load_state_dict(load_checkpoint('upsample-inpaint', device))\nprint('total upsampler parameters', sum(x.numel() for x in model_up.parameters()))\ndef show_images(batch: th.Tensor):\n \"\"\" Display a batch of images inline. \"\"\"\n scaled = ((batch + 1)*127.5).round().clamp(0,255).to(th.uint8).cpu()\n reshaped = scaled.permute(2, 0, 3, 1).reshape([batch.shape[2], -1, 3])\n display(Image.fromarray(reshaped.numpy()))\n\ndef read_image(path: str, size: int = 256) -> Tuple[th.Tensor, th.Tensor]:\n pil_img = Image.open(path).convert('RGB')\n pil_img = pil_img.resize((size, size), resample=Image.BICUBIC)\n img = np.array(pil_img)\n return th.from_numpy(img)[None].permute(0, 3, 1, 2).float() / 127.5 - 1\n\n#sampling parameters\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--prompt\", type=str)\nparser.add_argument(\"--filename\", type=str)\nargs = parser.parse_args()\n\n\nprompt = args.prompt\nfile_name = args.filename\nbatch_size = 1\nguidance_scale = 5.0\n\n# Tune this parameter to control the sharpness of 256x256 images.\n# A value of 1.0 is sharper, but sometimes results in grainy artifacts.\nupsample_temp = 0.997\n\n# Source image we are inpainting\nsource_image_256 = read_image(file_name, size=256)\nsource_image_64 = read_image(file_name, size=64)\n\n# The mask should always be a boolean 64x64 mask, and then we\n# can upsample it for the second stage.\nsource_mask_64 = th.ones_like(source_image_64)[:, :1]\nsource_mask_64[:, :, 20:] = 0\nsource_mask_256 = F.interpolate(source_mask_64, (256, 256), mode='nearest')\n\n##############################\n# Sample from the base model #\n##############################\n\n# Create the text tokens to feed to the model.\ntokens = model.tokenizer.encode(prompt)\ntokens, mask = model.tokenizer.padded_tokens_and_mask(\n tokens, options['text_ctx']\n)\n\n# Create the classifier-free guidance tokens (empty)\nfull_batch_size = batch_size * 2\nuncond_tokens, uncond_mask = model.tokenizer.padded_tokens_and_mask(\n [], options['text_ctx']\n)\n\n# Pack the tokens together into model kwargs.\nmodel_kwargs = dict(\n tokens=th.tensor(\n [tokens] * batch_size + [uncond_tokens] * batch_size, device=device\n ),\n mask=th.tensor(\n [mask] * batch_size + [uncond_mask] * batch_size,\n dtype=th.bool,\n device=device,\n ),\n\n # Masked inpainting image\n inpaint_image=(source_image_64 * source_mask_64).repeat(full_batch_size, 1, 1, 1).to(device),\n inpaint_mask=source_mask_64.repeat(full_batch_size, 1, 1, 1).to(device),\n)\n\n# Create an classifier-free guidance sampling function\ndef model_fn(x_t, ts, **kwargs):\n half = x_t[: len(x_t) // 2]\n combined = th.cat([half, half], dim=0)\n model_out = model(combined, ts, **kwargs)\n eps, rest = model_out[:, :3], model_out[:, 3:]\n cond_eps, uncond_eps = th.split(eps, len(eps) // 2, dim=0)\n half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps)\n eps = th.cat([half_eps, half_eps], dim=0)\n return th.cat([eps, rest], dim=1)\n\ndef denoised_fn(x_start):\n # Force the model to have the exact right x_start predictions\n # for the part of the image which is known.\n return (\n x_start * (1 - model_kwargs['inpaint_mask'])\n + model_kwargs['inpaint_image'] * model_kwargs['inpaint_mask']\n )\n\n# Sample from the base model.\nmodel.del_cache()\nsamples = diffusion.p_sample_loop(\n model_fn,\n (full_batch_size, 3, options[\"image_size\"], options[\"image_size\"]),\n device=device,\n clip_denoised=True,\n progress=True,\n model_kwargs=model_kwargs,\n cond_fn=None,\n denoised_fn=denoised_fn,\n)[:batch_size]\nmodel.del_cache()\n\n# Show the output\n##############################\n# Upsample the 64x64 samples #\n##############################\n\ntokens = model_up.tokenizer.encode(prompt)\ntokens, mask = model_up.tokenizer.padded_tokens_and_mask(\n tokens, options_up['text_ctx']\n)\n\n# Create the model conditioning dict.\nmodel_kwargs = dict(\n # Low-res image to upsample.\n low_res=((samples+1)*127.5).round()/127.5 - 1,\n\n # Text tokens\n tokens=th.tensor(\n [tokens] * batch_size, device=device\n ),\n mask=th.tensor(\n [mask] * batch_size,\n dtype=th.bool,\n device=device,\n ),\n\n # Masked inpainting image.\n inpaint_image=(source_image_256 * source_mask_256).repeat(batch_size, 1, 1, 1).to(device),\n inpaint_mask=source_mask_256.repeat(batch_size, 1, 1, 1).to(device),\n)\n\ndef denoised_fn(x_start):\n # Force the model to have the exact right x_start predictions\n # for the part of the image which is known.\n return (\n x_start * (1 - model_kwargs['inpaint_mask'])\n + model_kwargs['inpaint_image'] * model_kwargs['inpaint_mask']\n )\n\n# Sample from the base model.\nmodel_up.del_cache()\nup_shape = (batch_size, 3, options_up[\"image_size\"], options_up[\"image_size\"])\nup_samples = diffusion_up.p_sample_loop(\n model_up,\n up_shape,\n noise=th.randn(up_shape, device=device) * upsample_temp,\n device=device,\n clip_denoised=True,\n progress=True,\n model_kwargs=model_kwargs,\n cond_fn=None,\n denoised_fn=denoised_fn,\n)[:batch_size]\nmodel_up.del_cache()\nfile_name = 'inpaint.jpeg'\n\ndef save_images(batch: th.Tensor):\n \"\"\" save batch of images \"\"\"\n scaled = ((batch + 1)*127.5).round().clamp(0,255).to(th.uint8).cpu()\n reshaped = scaled.permute(2, 0, 3, 1).reshape([batch.shape[2], -1, 3])\n img = Image.fromarray(reshaped.numpy())\n img.save(str(file_name))\n\n\n\nsave_images(up_samples)" ]
[ [ "torch.randn_like", "numpy.log", "numpy.sqrt", "numpy.linspace", "torch.zeros", "torch.sqrt", "torch.randn", "torch.from_numpy", "numpy.ones", "torch.exp", "torch.tensor", "numpy.cumprod", "numpy.append", "torch.no_grad", "torch.split", "numpy.array" ], [ "torch.cat", "torch.randn", "torch.from_numpy", "torch.tensor", "torch.cuda.is_available", "torch.nn.functional.interpolate", "torch.device", "numpy.array", "torch.ones_like" ] ]
codema-dev/seai_deap
[ "52b67582beac8d8a2b46b5991970b6ad6695f7b3" ]
[ "tests/test_fab.py" ]
[ "import numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom seai_deap import fab\n\n\ndef test_calculate_thermal_bridging() -> None:\n\n expected_output = np.array(0.75)\n\n output = fab.calculate_thermal_bridging(\n wall_area=np.array(1),\n roof_area=np.array(1),\n floor_area=np.array(1),\n window_area=np.array(1),\n door_area=np.array(1),\n thermal_bridging_factor=np.array(0.15),\n )\n\n assert_array_equal(output, expected_output)\n\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.array" ] ]
patrick-g-zhang/MPNet
[ "7788b5883c8d3037215d7c0b939b6e9b6e5f4bca" ]
[ "pretraining/setup.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom setuptools import setup, find_packages, Extension\nimport sys\n\n\nif sys.version_info < (3,):\n sys.exit('Sorry, Python3 is required for fairseq.')\n\n\nwith open('README.md') as f:\n readme = f.read()\n\n\nif sys.platform == 'darwin':\n extra_compile_args = ['-stdlib=libc++', '-O3']\nelse:\n extra_compile_args = ['-std=c++11', '-O3']\n\n\nclass NumpyExtension(Extension):\n \"\"\"Source: https://stackoverflow.com/a/54128391\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.__include_dirs = []\n super().__init__(*args, **kwargs)\n\n @property\n def include_dirs(self):\n import numpy\n return self.__include_dirs + [numpy.get_include()]\n\n @include_dirs.setter\n def include_dirs(self, dirs):\n self.__include_dirs = dirs\n\n\nextensions = [\n Extension(\n 'fairseq.libbleu',\n sources=[\n 'fairseq/clib/libbleu/libbleu.cpp',\n 'fairseq/clib/libbleu/module.cpp',\n ],\n extra_compile_args=extra_compile_args,\n ),\n NumpyExtension(\n 'fairseq.data.data_utils_fast',\n sources=['fairseq/data/data_utils_fast.pyx'],\n language='c++',\n extra_compile_args=extra_compile_args,\n ),\n NumpyExtension(\n 'fairseq.data.token_block_utils_fast',\n sources=['fairseq/data/token_block_utils_fast.pyx'],\n language='c++',\n extra_compile_args=extra_compile_args,\n ),\n NumpyExtension(\n 'fairseq.data.permutation_utils',\n sources=['fairseq/data/permutation_utils.pyx'],\n language='c++',\n extra_compile_args=extra_compile_args,\n ),\n]\n\n\nsetup(\n name='fairseq',\n version='0.8.0',\n description='Facebook AI Research Sequence-to-Sequence Toolkit',\n url='https://github.com/pytorch/fairseq',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n long_description=readme,\n long_description_content_type='text/markdown',\n setup_requires=[\n 'cython',\n 'numpy',\n 'setuptools>=18.0',\n ],\n install_requires=[\n 'cffi',\n 'cython',\n 'fastBPE',\n 'numpy',\n 'regex',\n 'sacrebleu',\n 'torch',\n 'tqdm',\n ],\n packages=find_packages(exclude=['scripts', 'tests']),\n ext_modules=extensions,\n test_suite='tests',\n entry_points={\n 'console_scripts': [\n 'fairseq-eval-lm = fairseq_cli.eval_lm:cli_main',\n 'fairseq-generate = fairseq_cli.generate:cli_main',\n 'fairseq-interactive = fairseq_cli.interactive:cli_main',\n 'fairseq-preprocess = fairseq_cli.preprocess:cli_main',\n 'fairseq-score = fairseq_cli.score:main',\n 'fairseq-train = fairseq_cli.train:cli_main',\n 'fairseq-validate = fairseq_cli.validate:cli_main',\n ],\n },\n zip_safe=False,\n)\n" ]
[ [ "numpy.get_include" ] ]
alexacarlson/pggan-pytorch
[ "96600b13dbda732b7c2737ee0e15c47bc239d7ca" ]
[ "dataloader.py" ]
[ "import os\nimport torch as torch\nimport numpy as np\nfrom io import BytesIO\nimport scipy.misc\n#import tensorflow as tf\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import ImageFolder\nfrom torch.autograd import Variable\n#from matplotlib import pyplot as plt\nfrom PIL import Image\n\n\nclass dataloader:\n def __init__(self, config):\n self.root = config.train_data_root\n self.batch_table = {4:32, 8:32, 16:32, 32:16, 64:16, 128:16, 256:12, 512:3, 1024:1} # change this according to available gpu memory.\n self.batchsize = int(self.batch_table[pow(2,2)]) # we start from 2^2=4\n self.imsize = int(pow(2,2))\n self.num_workers = 4\n \n def renew(self, resl):\n print('[*] Renew dataloader configuration, load data from {}.'.format(self.root))\n \n self.batchsize = int(self.batch_table[pow(2,resl)])\n self.imsize = int(pow(2,resl))\n self.dataset = ImageFolder(\n root=self.root,\n transform=transforms.Compose( [\n transforms.Resize(size=(self.imsize,self.imsize), interpolation=Image.NEAREST),\n transforms.ToTensor(),\n ]))\n\n self.dataloader = DataLoader(\n dataset=self.dataset,\n batch_size=self.batchsize,\n shuffle=True,\n num_workers=self.num_workers\n )\n\n def __iter__(self):\n return iter(self.dataloader)\n \n def __next__(self):\n return next(self.dataloader)\n\n def __len__(self):\n return len(self.dataloader.dataset)\n\n \n def get_batch(self):\n dataIter = iter(self.dataloader)\n return next(dataIter)[0].mul(2).add(-1) # pixel range [-1, 1]\n\n\n \n\n\n\n\n\n\n\n\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
mcgibbon/marble
[ "801abdf65e112203d2b3c8983b0f73b0a4c821da" ]
[ "examples/initialization.py" ]
[ "import xarray as xr\nimport sympl\nimport numpy as np\nfrom marble import InputHeightToPrincipalComponents, convert_height_to_principal_components\nimport os\n\nheight_to_pc = InputHeightToPrincipalComponents()\n\ndata_path = os.path.join(\n os.path.dirname(\n os.path.realpath(__file__)\n ),\n 'data',\n)\n\ncolumn_filename = os.path.join(data_path, 'era5_column-2016.nc')\n\n\ndef convert_dataarray_to_sympl(dict_of_dataarray):\n for name, array in dict_of_dataarray.items():\n if isinstance(array, xr.DataArray):\n dict_of_dataarray[name] = sympl.DataArray(array)\n\n\ndef get_era5_state(latent_filename, latent=True, i_timestep=0):\n state = {}\n ds = xr.open_dataset(latent_filename)\n state['total_water_mixing_ratio'] = ds['rt'][i_timestep, :]\n state['total_water_mixing_ratio'].attrs['units'] = 'kg/kg'\n state['liquid_water_static_energy'] = ds['sl'][i_timestep, :]\n state['liquid_water_static_energy'].attrs['units'] = 'J/kg'\n state['height'] = sympl.DataArray(\n np.linspace(0, 3000., 20),\n dims=['z_star'],\n attrs={'units': 'm'},\n )\n state['time'] = sympl.timedelta(0)\n if latent:\n state['vertical_wind'] = ds['w'][i_timestep, :]\n state['vertical_wind'].attrs['units'] = 'm/s'\n convert_dataarray_to_sympl(state)\n if latent:\n state = height_to_pc(state)\n state.pop('vertical_wind_components')\n state['time'] = sympl.timedelta(0)\n return state\n\n\ndef get_era5_forcing(latent_filename, i_timestep, latent=True):\n state = {}\n ds = xr.open_dataset(latent_filename)\n state['surface_latent_heat_flux'] = ds['lhf'][i_timestep] / 3600. # divide by one hour to go from J/m^2 to W/m^2\n state['surface_latent_heat_flux'].attrs['units'] = 'W/m^2'\n state['surface_sensible_heat_flux'] = ds['shf'][i_timestep] / 3600.\n state['surface_sensible_heat_flux'].attrs['units'] = 'W/m^2'\n state['surface_temperature'] = ds['sst'][i_timestep]\n state['surface_temperature'].attrs['units'] = 'degK'\n state['surface_air_pressure'] = ds['p_surface'][i_timestep]\n state['surface_air_pressure'].attrs['units'] = 'Pa'\n state['vertical_wind'] = ds['w'][i_timestep, :]\n state['vertical_wind'].attrs['units'] = 'm/s'\n state['liquid_water_static_energy_horizontal_advective_tendency'] = ds['sl_adv'][i_timestep, :]\n state['total_water_mixing_ratio_horizontal_advective_tendency'] = ds['rt_adv'][i_timestep, :]\n state['downwelling_shortwave_radiation_at_3km'] = ds['swdn_tod'][i_timestep]\n state['downwelling_shortwave_radiation_at_3km'].attrs['units'] = 'W/m^2'\n state['downwelling_shortwave_radiation_at_top_of_atmosphere'] = ds['swdn_toa'][i_timestep]\n state['downwelling_shortwave_radiation_at_top_of_atmosphere'].attrs['units'] = 'W/m^2'\n state['mid_cloud_fraction'] = ds['cldmid'][i_timestep]\n state['mid_cloud_fraction'].attrs['units'] = ''\n state['high_cloud_fraction'] = ds['cldhigh'][i_timestep]\n state['high_cloud_fraction'].attrs['units'] = ''\n state['total_water_mixing_ratio_at_3km'] = ds['rt'][i_timestep, -1]\n state['total_water_mixing_ratio_at_3km'].attrs['units'] = 'kg/kg'\n state['liquid_water_static_energy_at_3km'] = ds['sl'][i_timestep, -1]\n state['liquid_water_static_energy_at_3km'].attrs['units'] = 'J/kg'\n state['rain_water_mixing_ratio_at_3km'] = ds['rrain'][i_timestep, -1]\n state['rain_water_mixing_ratio_at_3km'].attrs['units'] = 'kg/kg'\n if latent:\n state['total_water_mixing_ratio'] = ds['rt'][i_timestep, :]\n state['total_water_mixing_ratio'].attrs['units'] = 'kg/kg'\n state['liquid_water_static_energy'] = ds['sl'][i_timestep, :]\n state['liquid_water_static_energy'].attrs['units'] = 'J/kg'\n convert_dataarray_to_sympl(state)\n if latent:\n state['liquid_water_static_energy_components_horizontal_advective_tendency'] = \\\n sympl.DataArray(\n convert_height_to_principal_components(\n state['liquid_water_static_energy_horizontal_advective_tendency'],\n basis_name='sl', subtract_mean=False\n ), dims=['sl_latent'], attrs={'units': 's^-1'}\n )\n state['total_water_mixing_ratio_components_horizontal_advective_tendency'] = \\\n sympl.DataArray(\n convert_height_to_principal_components(\n state['total_water_mixing_ratio_horizontal_advective_tendency'],\n basis_name='rt', subtract_mean=False\n ), dims=['rt_latent'], attrs={'units': 's^-1'}\n )\n pc_state = {}\n pc_state.update(state)\n pc_state['time'] = sympl.timedelta(0)\n pc_state = height_to_pc(pc_state)\n pc_state.pop('total_water_mixing_ratio_components')\n pc_state.pop('liquid_water_static_energy_components')\n state.update(pc_state)\n return state\n\n\ndef get_era5_diagnostics(latent_filename, i_timestep):\n state = {}\n ds = xr.open_dataset(latent_filename)\n state['cloud_fraction'] = ds['cld'][i_timestep, :]\n state['cloud_fraction'].attrs['units'] = ''\n state['surface_precipitation_rate'] = ds['precip'][i_timestep]\n state['surface_precipitation_rate'].attrs['units'] = 'mm/hr'\n state['rain_water_mixing_ratio'] = ds['rrain'][i_timestep, :]\n state['rain_water_mixing_ratio'].attrs['units'] = 'kg/kg'\n state['cloud_water_mixing_ratio'] = ds['rcld'][i_timestep, :]\n state['cloud_water_mixing_ratio'].attrs['units'] = 'kg/kg'\n state['clear_sky_radiative_heating_rate'] = ds['sl_rad_clr'][i_timestep, :]\n state['clear_sky_radiative_heating_rate'].attrs['units'] = 'degK/hr'\n state['low_cloud_fraction'] = ds['cldlow'][i_timestep]\n state['low_cloud_fraction'].attrs['units'] = ''\n state['column_cloud_water'] = ds['ccw'][i_timestep]\n state['column_cloud_water'].attrs['units'] = 'kg/m^2'\n convert_dataarray_to_sympl(state)\n return state\n" ]
[ [ "numpy.linspace" ] ]