repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
iamharsha1999/theganbibliotheca
|
[
"c764af367e850555d04d6760b6bca756737670d3"
] |
[
"ChromaGAN/train.py"
] |
[
"import torch \nfrom torch.autograd import grad \nfrom tqdm import tqdm\nfrom torch.optim import Adam\nimport torch.nn as nn\nimport pretrainedmodels\nimport cv2\n\nclass Trainer():\n\n def __init__(self, gen , dis,data, fixed_gray_images,device = 'cuda'):\n\n self.device = device \n self.generator = gen \n self.discriminator = dis\n self.vgg_model = pretrainedmodels.__dict__['vgg16'](pretrained = 'imagenet').to(device).eval()\n for param in self.vgg_model.parameters():\n param.requires_grad = False\n\n ## Device Initialization\n if torch.cuda.is_available():\n self.device = torch.device('cuda')\n print('Device Name:', torch.cuda.get_device_name(0))\n else:\n self.device = torch.device('cpu')\n \n self.generator = self.generator.to(self.device)\n self.discriminator = self.discriminator.to(self.device)\n\n self.epochs = 5\n self.batch_size = 10\n self.gen_optimizer = Adam(self.gen.parameters(), lr = 2e-5, betas=(0.5, 0.999))\n self.dis_optimizer = Adam(self.dis.parameters(), lr = 2e-5, betas=(0.5, 0.999))\n \n self.klloss = nn.KLDivLoss()\n self.mseloss = nn.MSELoss() \n self.dataloader = data \n\n self.gp_weight = 10\n\n self.fixed_noise = fixed_gray_images.to(self.device)\n \n def wgan_loss(self,fake, real ):\n \n return -(torch.mean(real) - torch.mean(fake))\n \n def gradient_penalty(self, real_ab, fake_ab, real_l):\n\n interpolated = self.interpolated_images(real_ab, fake_ab)\n dis_interpolated = self.discriminator(interpolated,real_l)\n\n gradients = grad(outputs = dis_interpolated, inputs=interpolated, grad_outputs=torch.ones(dis_interpolated.size()).to(self.device),\n create_graph=True, retain_graph=True)[0]\n gradients = gradients.view(real_ab.size()[0], -1)\n gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)\n\n loss = self.gp_weight * ((gradients_norm - 1) ** 2).mean()\n\n return loss \n \n def interpolated_images(self, x, y):\n\n alpha = torch.randn(x.size()[0],1,1,1).expand_as(x).to(self.device)\n interpolated = alpha * x + (1-alpha) * y\n\n return interpolated\n\n \n def train_gen(self, l, vgg_output, real_ab):\n\n ## Generate images and class output\n img_class, gen_out = self.generator(l)\n \n ## Fake images for discriminator\n dis_out = self.discriminator(gen_out, l)\n\n dis_loss = -dis_out.mean()\n kl_loss = self.klloss(img_class, vgg_output)\n ce_loss = self.mseloss(gen_out, real_ab)\n\n loss = ce_loss + 0.003*kl_loss + dis_loss\n\n\n ## Update the weights\n self.generator.zero_grad()\n loss.backward()\n self.gen_optimizer.step()\n\n return loss \n\n def train_disc(self, real_ab, real_l):\n\n ## Generated Data\n _, fake_ab = self.generator(real_l)\n\n ## Predictions from discriminator for real and fake chrominance channels\n real_dis_out = self.discriminator(real_ab, real_l)\n fake_dis_out = self.discriminator(fake_ab.detach(), real_l)\n\n ## Compute the gradient penalty\n gp = self.gradient_penalty(real_ab, fake_ab, real_l)\n\n ##Update the weights\n self.discriminator.zero_grad()\n loss = self.wgan_loss(fake_dis_out, real_dis_out) + gp \n loss.backward()\n self.dis_optimizer.step()\n\n return loss\n \n def plot_images(self,real_l, no_of_images, epoch_no):\n\n with torch.no_grad():\n _,pred_ab = self.generator(real_l)\n img = torch.cat((real_l,pred_ab), dim =1)\n img = img.to('cpu').numpy()\n \n for i in len(no_of_images):\n img[i] = cv2.cvtColor(img[i], cv2.COLOR_Lab2LBGR)\n plt.imshow(np.transpose(img[i], (1,2,0)), interpolation = 'none')\n plt.savefig('Image_Epoch:{}_{}.png'.format(epoch_no+1,i+1))\n \n def train(self):\n\n self.gen_loss = []\n self.dis_loss = []\n\n for epoch in range(self.epochs):\n\n epoch_loss = {'gen_loss':[], 'dis_loss':[]}\n print('[Epoch: {} / {}]'.format(epoch+1, self.epochs))\n \n eg_loss = []\n ed_loss = []\n \n for batch in tqdm(self.dataloader):\n\n real_l,real_ab = batch['l'].to(self.device),batch['ab'].to(self.device)\n \n ## Retrive the Batch Size\n bs = real_l.size(0)\n\n ## VGG class prediction for real grayscale image\n output_vgg = self.vgg_model(real_l.repeat(1,3,1,1))\n\n ## Update the generator\n gen_loss = self.train_gen(real_l, output_vgg, real_ab)\n eg_loss.append(gen_loss)\n\n ## Update the discrminator\n dis_loss = self.train_disc(real_ab, real_l)\n ed_loss.append(dis_loss)\n\n self.gen_loss.append(torch.mean(torch.FloatTensor(eg_loss)))\n self.dis_loss.append(torch.mean(torch.FloatTensor(ed_loss)))\n\n ## Print Epoch Information\n print('[ Generator Loss: {} | Discriminator Loss: {} ] '.format(gen_loss, dis_loss))\n\n ## Plot predicted images to visualize images\n self.plot_images(self.fixed_noise, self.fixed_noise.size()[0], epoch + 1)\n"
] |
[
[
"torch.mean",
"torch.nn.KLDivLoss",
"torch.cat",
"torch.sum",
"torch.no_grad",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.cuda.get_device_name",
"torch.device",
"torch.nn.MSELoss"
]
] |
Ericbrod10/Deep-Learning
|
[
"5b0a01597ce19f2da5bf45b76023b898c494f46a"
] |
[
"Assignment09/test.py"
] |
[
"import sys\nimport keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.utils import normalize\nfrom keras.models import *\n\n\n#load in model\nmodel = sys.argv[1]\ngen = load_model(model)\n\n#load noise parameters \nnoise = np.random.normal(loc=0, scale=1, size=[100, 100])\ngenImages = gen.predict(noise)\n\ngenImages = genImages.reshape(100, 28, 28)\n\n\nplt.figure(figsize=(10,10))\n\n\nfor i in range(genImages.shape[0]):\n plt.subplot(10, 10, i+1)\n\n plt.imshow(genImages[i], interpolation='nearest')\n plt.axis('off')\n\nplt.tight_layout()\n\n#load output save name as png and save the figure\noutput = sys.argv[2]\noutput = output + \".png\"\nplt.savefig(output)"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"numpy.random.normal",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure"
]
] |
n-fallahinia/finger-detection
|
[
"ebc3a5165b34156fa8f2abb44fde5d48b5405f95"
] |
[
"tfrecords_converter.py"
] |
[
"\"\"\" Simple script to convert TensorFlow XML-to-TFRecord \n\nNavid Fallahinia - 21/11/2020\nBioRobotics Lab\n\nusage: generate_tfrecord.py [-h] [-x XML_DIR] [-l LABELS_PATH] [-o OUTPUT_PATH] [-i IMAGE_DIR] [-c CSV_PATH]\n\noptional arguments:\n -h, --help show this help message and exit\n -x XML_DIR, --xml_dir XML_DIR\n Path to the folder where the input .xml files are stored.\n -l LABELS_PATH, --labels_path LABELS_PATH\n Path to the labels (.pbtxt) file.\n -o OUTPUT_PATH, --output_path OUTPUT_PATH\n Path of output TFRecord (.record) file.\n -i IMAGE_DIR, --image_dir IMAGE_DIR\n Path to the folder where the input image files are stored. Defaults to the same directory as XML_DIR.\n -c CSV_PATH, --csv_path CSV_PATH\n Path of output .csv file. If none provided, then no file will be written.\n\"\"\"\n\nimport os\nimport glob\nimport pandas as pd\nimport io\nimport xml.etree.ElementTree as ET\nimport argparse\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1)\nimport tensorflow.compat.v1 as tf\nfrom PIL import Image\nfrom object_detection.utils import dataset_util, label_map_util\nfrom collections import namedtuple\n\n# Initiate argument parser\nparser = argparse.ArgumentParser(\n description=\"Sample TensorFlow XML-to-TFRecord converter\")\nparser.add_argument(\"-x\",\n \"--xml_dir\",\n help=\"Path to the folder where the input .xml files are stored.\",\n default='./images/train/annotations',\n type=str)\nparser.add_argument(\"-l\",\n \"--labels_path\",\n default=\"./annotations/label_map.pbtxt\",\n help=\"Path to the labels (.pbtxt) file.\", type=str)\nparser.add_argument(\"-o\",\n \"--output_path\",\n default='./annotations/train.record',\n help=\"Path of output TFRecord (.record) file.\", type=str)\nparser.add_argument(\"-i\",\n \"--image_dir\",\n help=\"Path to the folder where the input image files are stored. \"\n \"Defaults to the same directory as XML_DIR.\",\n type=str, default='./images/train/raw_image')\nparser.add_argument(\"-c\",\n \"--csv_path\",\n help=\"Path of output .csv file. If none provided, then no file will be \"\n \"written.\",\n type=str, default=None)\n\nargs = parser.parse_args()\n\nif args.image_dir is None:\n args.image_dir = args.xml_dir\n\nlabel_map = label_map_util.load_labelmap(args.labels_path)\nlabel_map_dict = label_map_util.get_label_map_dict(label_map)\n\n\ndef xml_to_csv(path):\n \"\"\"Iterates through all .xml files (generated by labelImg) in a given directory and combines\n them in a single Pandas dataframe.\n\n Parameters:\n ----------\n path : str\n The path containing the .xml files\n Returns\n -------\n Pandas DataFrame\n The produced dataframe\n \"\"\"\n\n xml_list = []\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n for member in root.findall('object'):\n value = (root.find('filename').text,\n int(root.find('size')[0].text),\n int(root.find('size')[1].text),\n member[0].text,\n int(member[4][0].text),\n int(member[4][1].text),\n int(member[4][2].text),\n int(member[4][3].text)\n )\n xml_list.append(value)\n column_name = ['filename', 'width', 'height',\n 'class', 'xmin', 'ymin', 'xmax', 'ymax']\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n return xml_df\n\n\ndef class_text_to_int(row_label):\n return label_map_dict[row_label]\n\n\ndef split(df, group):\n data = namedtuple('data', ['filename', 'object'])\n gb = df.groupby(group)\n return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]\n\n\ndef create_tf_example(group, path):\n with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n width, height = image.size\n\n filename = group.filename.encode('utf8')\n image_format = b'jpg'\n xmins = []\n xmaxs = []\n ymins = []\n ymaxs = []\n classes_text = []\n classes = []\n\n for index, row in group.object.iterrows():\n xmins.append(row['xmin'] / width)\n xmaxs.append(row['xmax'] / width)\n ymins.append(row['ymin'] / height)\n ymaxs.append(row['ymax'] / height)\n classes_text.append(row['class'].encode('utf8'))\n classes.append(class_text_to_int(row['class']))\n\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(filename),\n 'image/source_id': dataset_util.bytes_feature(filename),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature(image_format),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n }))\n return tf_example\n\n\ndef main(_):\n\n writer = tf.python_io.TFRecordWriter(args.output_path)\n path = os.path.join(args.image_dir)\n examples = xml_to_csv(args.xml_dir)\n grouped = split(examples, 'filename')\n Idx = 0\n for Idx, group in enumerate(grouped):\n tf_example = create_tf_example(group, path)\n writer.write(tf_example.SerializeToString())\n if Idx%50 == 0:\n print('%d Records are saved'% Idx); \n writer.close()\n print(' Successfully created the TFRecord file: {}'.format(args.output_path))\n if args.csv_path is not None:\n examples.to_csv(args.csv_path, index=None)\n print('[INFO] Successfully created the CSV file: {}'.format(args.csv_path))\n\nif __name__ == '__main__':\n tf.app.run()"
] |
[
[
"tensorflow.compat.v1.python_io.TFRecordWriter",
"pandas.DataFrame",
"tensorflow.compat.v1.app.run"
]
] |
kwang-12/basalt-mirror
|
[
"9dd7b2c8031283ec033211bc90ad70aa70323eaa"
] |
[
"python/basalt/latex/util.py"
] |
[
"#\n# BSD 3-Clause License\n#\n# This file is part of the Basalt project.\n# https://gitlab.com/VladyslavUsenko/basalt.git\n#\n# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.\n# All rights reserved.\n#\nimport math\nimport numpy as np\n\n\ndef best_two_non_repeating(array, reverse=False):\n if reverse:\n best = -math.inf\n second = -math.inf\n for v in array:\n if v > best:\n second = best\n best = v\n elif v < best and v > second:\n second = v\n else:\n best = math.inf\n second = math.inf\n for v in array:\n if v < best:\n second = best\n best = v\n elif v > best and v < second:\n second = v\n\n return best, second\n\n\ndef format_ratio(val, val_ref=None, decimals=0):\n if val_ref == 0:\n return \"{}\".format(math.inf)\n else:\n if val_ref is not None:\n val = float(val) / float(val_ref)\n return \"{:.{prec}f}\".format(val, prec=decimals)\n\n\ndef format_ratio_percent(val, val_ref=None, decimals=0):\n if val_ref == 0:\n return \"{}\".format(val)\n else:\n if val_ref is not None:\n val = float(val) / float(val_ref)\n val = 100 * val\n return \"{:.{prec}f}%\".format(val, prec=decimals)\n\n\ndef rotation2d(theta_deg):\n theta = np.radians(theta_deg)\n\n R = np.array(((np.cos(theta), -np.sin(theta)), (np.sin(theta), np.cos(theta))))\n\n return R\n"
] |
[
[
"numpy.radians",
"numpy.cos",
"numpy.sin"
]
] |
Jonahowns/oxTorch_prototype
|
[
"e6c7c393b1100bc8604bd9ffc7a70c9c77e9e531"
] |
[
"cgdms/cgdms.py"
] |
[
"# Differentiable molecular simulation of proteins with a coarse-grained potential\n# Author: Joe G Greener\n\n# biopython, PeptideBuilder and colorama are also imported in functions\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom torch.nn.functional import normalize\n\nfrom itertools import count\nfrom math import pi\nimport os\nfrom random import choices, gauss, random, randrange, shuffle\n\ncgdms_dir = os.path.dirname(os.path.realpath(__file__))\ndataset_dir = os.path.join(cgdms_dir, \"datasets\")\ntrain_val_dir = os.path.join(cgdms_dir, \"protein_data\", \"train_val\")\ntrained_model_file = os.path.join(cgdms_dir, \"cgdms_params_ep45.pt\")\n\nn_bins_pot = 140\nn_bins_force = n_bins_pot - 2\nn_adjacent = 4\n\natoms = [\"N\", \"CA\", \"C\", \"cent\"]\n\n# Last value is the number of atoms in the next residue\nangles = [\n (\"N\", \"CA\", \"C\" , 0), (\"CA\", \"C\" , \"N\" , 1), (\"C\", \"N\", \"CA\", 2),\n (\"N\", \"CA\", \"cent\", 0), (\"C\" , \"CA\", \"cent\", 0),\n]\n\n# Last value is the number of atoms in the next residue\ndihedrals = [\n (\"C\", \"N\", \"CA\", \"C\" , 3), (\"N\" , \"CA\", \"C\", \"N\", 1), (\"CA\", \"C\", \"N\", \"CA\", 2),\n (\"C\", \"N\", \"CA\", \"cent\", 3), (\"cent\", \"CA\", \"C\", \"N\", 1),\n]\n\naas = [\n \"A\", \"R\", \"N\", \"D\", \"C\", \"E\", \"Q\", \"G\", \"H\", \"I\",\n \"L\", \"K\", \"M\", \"F\", \"P\", \"S\", \"T\", \"W\", \"Y\", \"V\",\n]\nn_aas = len(aas)\n\none_to_three_aas = {\n \"C\": \"CYS\", \"D\": \"ASP\", \"S\": \"SER\", \"Q\": \"GLN\", \"K\": \"LYS\",\n \"I\": \"ILE\", \"P\": \"PRO\", \"T\": \"THR\", \"F\": \"PHE\", \"N\": \"ASN\",\n \"G\": \"GLY\", \"H\": \"HIS\", \"L\": \"LEU\", \"R\": \"ARG\", \"W\": \"TRP\",\n \"A\": \"ALA\", \"V\": \"VAL\", \"E\": \"GLU\", \"Y\": \"TYR\", \"M\": \"MET\",\n}\nthree_to_one_aas = {one_to_three_aas[k]: k for k in one_to_three_aas}\n\naa_masses = {\n \"A\": 89.09 , \"R\": 174.2, \"N\": 132.1, \"D\": 133.1, \"C\": 121.2,\n \"E\": 147.1 , \"Q\": 146.1, \"G\": 75.07, \"H\": 155.2, \"I\": 131.2,\n \"L\": 131.2 , \"K\": 146.2, \"M\": 149.2, \"F\": 165.2, \"P\": 115.1,\n \"S\": 105.09, \"T\": 119.1, \"W\": 204.2, \"Y\": 181.2, \"V\": 117.1,\n}\n\nss_types = [\"H\", \"E\", \"C\"]\n\n# Minima in the learned potential after 45 epochs of training\ncentroid_dists = {\n \"A\": 1.5575, \"R\": 4.3575, \"N\": 2.5025, \"D\": 2.5025, \"C\": 2.0825,\n \"E\": 3.3425, \"Q\": 3.3775, \"G\": 1.0325, \"H\": 3.1675, \"I\": 2.3975,\n \"L\": 2.6075, \"K\": 3.8325, \"M\": 3.1325, \"F\": 3.4125, \"P\": 1.9075,\n \"S\": 1.9425, \"T\": 1.9425, \"W\": 3.9025, \"Y\": 3.7975, \"V\": 1.9775,\n}\n\n# Approximate fraction of each amino acid in the PDB\npdb_aa_frequencies = {\n \"A\": 0.0728, \"R\": 0.0571, \"N\": 0.0409, \"D\": 0.0563, \"C\": 0.0237,\n \"E\": 0.0835, \"Q\": 0.0461, \"G\": 0.0608, \"H\": 0.0214, \"I\": 0.0580,\n \"L\": 0.0963, \"K\": 0.0718, \"M\": 0.0220, \"F\": 0.0337, \"P\": 0.0383,\n \"S\": 0.0580, \"T\": 0.0500, \"W\": 0.0121, \"Y\": 0.0300, \"V\": 0.0672,\n}\n\ntrain_proteins = [l.rstrip() for l in open(os.path.join(dataset_dir, \"train.txt\"))]\nval_proteins = [l.rstrip() for l in open(os.path.join(dataset_dir, \"val.txt\" ))]\n\ndef get_bin_centres(min_dist, max_dist):\n gap_dist = (max_dist - min_dist) / n_bins_pot\n bcs_pot = [min_dist + i * gap_dist + 0.5 * gap_dist for i in range(n_bins_pot)]\n return bcs_pot[1:-1]\n\ninteractions = []\ndist_bin_centres = []\n\n# Generate distance interaction list\nfor i, aa_1 in enumerate(aas):\n for ai, atom_1 in enumerate(atoms):\n for atom_2 in atoms[(ai + 1):]:\n interactions.append(f\"{aa_1}_{atom_1}_{aa_1}_{atom_2}_same\")\n dist_bin_centres.append(get_bin_centres(0.7, 5.6))\n for aa_2 in aas[i:]:\n for ai, atom_1 in enumerate(atoms):\n atom_iter = atoms if aa_1 != aa_2 else atoms[ai:]\n for atom_2 in atom_iter:\n interactions.append(f\"{aa_1}_{atom_1}_{aa_2}_{atom_2}_other\")\n dist_bin_centres.append(get_bin_centres(1.0, 15.0))\n for aa_2 in aas:\n for atom_1 in atoms:\n for atom_2 in atoms:\n for ar in range(1, n_adjacent + 1):\n interactions.append(f\"{aa_1}_{atom_1}_{aa_2}_{atom_2}_adj{ar}\")\n dist_bin_centres.append(get_bin_centres(0.7, 14.7))\ninteractions.append(\"self_placeholder\") # This gets zeroed out during the simulation\ndist_bin_centres.append([0.0] * n_bins_force)\n\ngap_ang = (pi - pi / 3) / n_bins_pot\nangle_bin_centres = [pi / 3 + i * gap_ang + 0.5 * gap_ang for i in range(n_bins_pot)][1:-1]\n\ngap_dih = (2 * pi) / n_bins_pot\n# Two invisible bins on the end imitate periodicity\ndih_bin_centres = [-pi + i * gap_dih - 0.5 * gap_dih for i in range(n_bins_pot + 2)][1:-1]\n\n# Report a message if it exceeds the verbosity level\ndef report(msg, msg_verbosity=0, verbosity=2):\n if msg_verbosity <= verbosity:\n print(msg)\n\n# Read an input data file\n# The protein sequence is read from the file but will overrule the file if provided\ndef read_input_file(fp, seq=\"\", device=\"cpu\"):\n with open(fp) as f:\n lines = f.readlines()\n if seq == \"\":\n seq = lines[0].rstrip()\n ss_pred = lines[1].rstrip()\n assert len(seq) == len(ss_pred), f\"Sequence length is {len(seq)} but SS prediction length is {len(ss_pred)}\"\n seq_info = []\n for i in range(len(seq)):\n for atom in atoms:\n seq_info.append((i, atom))\n n_atoms = len(seq_info)\n native_coords = torch.tensor(np.loadtxt(fp, skiprows=2), dtype=torch.float,\n device=device).view(n_atoms, 3)\n\n inters = torch.ones(n_atoms, n_atoms, dtype=torch.long, device=device) * -1\n for i in range(n_atoms):\n inters[i, i] = len(interactions) - 1 # Placeholder for same atom\n for j in range(i):\n res_sep = abs(seq_info[i][0] - seq_info[j][0])\n if 1 <= res_sep <= n_adjacent:\n # Due to known ordering we know that the order of residues is j->i\n info_1, info_2 = seq_info[j], seq_info[i]\n else:\n # Sort by amino acid index then by atom\n info_1, info_2 = sorted([seq_info[i], seq_info[j]],\n key=lambda x : (aas.index(seq[x[0]]), atoms.index(x[1])))\n inter = f\"{seq[info_1[0]]}_{info_1[1]}_{seq[info_2[0]]}_{info_2[1]}\"\n if res_sep == 0:\n inter += \"_same\"\n elif res_sep <= n_adjacent:\n inter += f\"_adj{res_sep}\"\n else:\n inter += \"_other\"\n inter_i = interactions.index(inter)\n inters[i, j] = inter_i\n inters[j, i] = inter_i\n inters_flat = inters.view(n_atoms * n_atoms)\n\n masses = []\n for i, r in enumerate(seq):\n mass_CA = 13.0 # Includes H\n mass_N = 15.0 # Includes amide H\n if i == 0:\n mass_N += 2.0 # Add charged N-terminus\n mass_C = 28.0 # Includes carbonyl O\n if i == len(seq) - 1:\n mass_C += 16.0 # Add charged C-terminus\n mass_cent = aa_masses[r] - 74.0 # Subtract non-centroid section\n if r == \"G\":\n mass_cent += 10.0 # Make glycine artificially heavier\n masses.append(mass_N)\n masses.append(mass_CA)\n masses.append(mass_C)\n masses.append(mass_cent)\n masses = torch.tensor(masses, device=device)\n\n # Different angle potentials for each residue\n inters_ang = torch.tensor([aas.index(r) for r in seq], dtype=torch.long, device=device)\n\n # Different dihedral potentials for each residue and predicted secondary structure type\n inters_dih = torch.tensor([aas.index(r) * len(ss_types) + ss_types.index(s) for r, s in zip(seq, ss_pred)],\n dtype=torch.long, device=device)\n\n return native_coords, inters_flat, inters_ang, inters_dih, masses, seq\n\n# Read an input data file and thread a new sequence onto it\ndef read_input_file_threaded(fp, seq, device=\"cpu\"):\n coords, inters_flat, inters_ang, inters_dih, masses, seq = read_input_file(fp, seq, device=device)\n\n # Move centroids out to minimum distances for that sequence\n ind_ca, ind_cent = atoms.index(\"CA\"), atoms.index(\"cent\")\n for i, r in enumerate(seq):\n ca_cent_diff = coords[i * len(atoms) + ind_cent] - coords[i * len(atoms) + ind_ca]\n ca_cent_unitvec = ca_cent_diff / ca_cent_diff.norm()\n coords[i * len(atoms) + ind_cent] = coords[i * len(atoms) + ind_ca] + centroid_dists[r] * ca_cent_unitvec\n\n return coords, inters_flat, inters_ang, inters_dih, masses, seq\n\n# Read a dataset of input files\nclass ProteinDataset(Dataset):\n def __init__(self, pdbids, coord_dir, device=\"cpu\"):\n self.pdbids = pdbids\n self.coord_dir = coord_dir\n self.set_size = len(pdbids)\n self.device = device\n\n def __len__(self):\n return self.set_size\n\n def __getitem__(self, index):\n fp = os.path.join(self.coord_dir, self.pdbids[index] + \".txt\")\n return read_input_file(fp, device=self.device)\n\n# Differentiable molecular simulation of proteins with a coarse-grained potential\nclass Simulator(torch.nn.Module):\n def __init__(self, ff_distances, ff_angles, ff_dihedrals):\n super(Simulator, self).__init__()\n self.ff_distances = torch.nn.Parameter(ff_distances)\n self.ff_angles = torch.nn.Parameter(ff_angles)\n self.ff_dihedrals = torch.nn.Parameter(ff_dihedrals)\n\n def forward(self,\n coords,\n inters_flat,\n inters_ang,\n inters_dih,\n masses,\n seq,\n native_coords,\n n_steps,\n integrator=\"vel\", # vel/no_vel/min/langevin/langevin_simple\n timestep=0.02,\n start_temperature=0.1,\n thermostat_const=0.0, # Set to 0.0 to run without a thermostat (NVE ensemble)\n temperature=0.0, # The effective temperature of the thermostat\n sim_filepath=None, # Output PDB file to write to or None to not write out\n energy=False, # Return the energy at the end of the simulation\n report_n=10_000, # Print and write PDB every report_n steps\n verbosity=2, # 0 for epoch info, 1 for protein info, 2 for simulation step info\n ):\n\n assert integrator in (\"vel\", \"no_vel\", \"min\", \"langevin\", \"langevin_simple\"), f\"Invalid integrator {integrator}\"\n device = coords.device\n batch_size, n_atoms = masses.size(0), masses.size(1)\n n_res = n_atoms // len(atoms)\n dist_bin_centres_tensor = torch.tensor(dist_bin_centres, device=device)\n pair_centres_flat = dist_bin_centres_tensor.index_select(0, inters_flat[0]).unsqueeze(0).expand(batch_size, -1, -1)\n pair_pots_flat = self.ff_distances.index_select(0, inters_flat[0]).unsqueeze(0).expand(batch_size, -1, -1)\n angle_bin_centres_tensor = torch.tensor(angle_bin_centres, device=device)\n angle_centres_flat = angle_bin_centres_tensor.unsqueeze(0).unsqueeze(0).expand(batch_size, n_res, -1)\n angle_pots_flat = self.ff_angles.index_select(1, inters_ang[0]).unsqueeze(0).expand(batch_size, -1, -1, -1)\n dih_bin_centres_tensor = torch.tensor(dih_bin_centres, device=device)\n dih_centres_flat = dih_bin_centres_tensor.unsqueeze(0).unsqueeze(0).expand(batch_size, n_res - 1, -1)\n dih_pots_flat = self.ff_dihedrals.index_select(1, inters_dih[0]).unsqueeze(0).expand(batch_size, -1, -1, -1)\n native_coords_ca = native_coords.view(batch_size, n_res, 3 * len(atoms))[0, :, 3:6]\n model_n = 0\n\n if integrator == \"vel\" or integrator == \"langevin\" or integrator == \"langevin_simple\":\n vels = torch.randn(coords.shape, device=device) * start_temperature\n accs_last = torch.zeros(coords.shape, device=device)\n elif integrator == \"no_vel\":\n coords_last = coords.clone() + torch.randn(coords.shape, device=device) * start_temperature * timestep\n\n # The step the energy is return on is not used for simulation so we add an extra step\n if energy:\n n_steps += 1\n\n for i in range(n_steps):\n if integrator == \"vel\":\n coords = coords + vels * timestep + 0.5 * accs_last * timestep * timestep\n elif integrator == \"langevin\":\n # From Gronbech-Jensen 2013\n alpha, twokbT = thermostat_const, temperature\n beta = np.sqrt(twokbT * alpha * timestep) * torch.randn(vels.shape, device=device)\n b = 1.0 / (1.0 + (alpha * timestep) / (2 * masses.unsqueeze(2)))\n coords_last = coords\n coords = coords + b * timestep * vels + 0.5 * b * (timestep ** 2) * accs_last + 0.5 * b * timestep * beta / masses.unsqueeze(2)\n elif integrator == \"langevin_simple\":\n coords = coords + vels * timestep + 0.5 * accs_last * timestep * timestep\n\n # See https://arxiv.org/pdf/1401.1181.pdf for derivation of forces.py\n printing = verbosity >= 2 and i % report_n == 0\n returning_energy = energy and i == n_steps - 1\n if printing or returning_energy:\n dist_energy = torch.zeros(1, device=device)\n angle_energy = torch.zeros(1, device=device)\n dih_energy = torch.zeros(1, device=device)\n\n # Add pairwise distance forces.py\n crep = coords.unsqueeze(1).expand(-1, n_atoms, -1, -1)\n diffs = crep - crep.transpose(1, 2)\n dists = diffs.norm(dim=3)\n dists_flat = dists.view(batch_size, n_atoms * n_atoms)\n dists_from_centres = pair_centres_flat - dists_flat.unsqueeze(2).expand(-1, -1, n_bins_force)\n dist_bin_inds = dists_from_centres.abs().argmin(dim=2).unsqueeze(2)\n # Force is gradient of potential\n # So it is proportional to difference of previous and next value of potential\n pair_forces_flat = 0.5 * (pair_pots_flat.gather(2, dist_bin_inds) - pair_pots_flat.gather(2, dist_bin_inds + 2))\n # Specify minimum to prevent division by zero errors\n norm_diffs = diffs / dists.clamp(min=0.01).unsqueeze(3)\n pair_accs = (pair_forces_flat.view(batch_size, n_atoms, n_atoms)).unsqueeze(3) * norm_diffs\n accs = pair_accs.sum(dim=1) / masses.unsqueeze(2)\n if printing or returning_energy:\n dist_energy += 0.5 * pair_pots_flat.gather(2, dist_bin_inds + 1).sum()\n\n atom_coords = coords.view(batch_size, n_res, 3 * len(atoms))\n atom_accs = torch.zeros(batch_size, n_res, 3 * len(atoms), device=device)\n # Angle forces.py\n # across_res is the number of atoms in the next residue, starting from atom_3\n for ai, (atom_1, atom_2, atom_3, across_res) in enumerate(angles):\n ai_1, ai_2, ai_3 = atoms.index(atom_1), atoms.index(atom_2), atoms.index(atom_3)\n if across_res == 0:\n ba = atom_coords[:, : , (ai_1 * 3):(ai_1 * 3 + 3)] - atom_coords[:, : , (ai_2 * 3):(ai_2 * 3 + 3)]\n bc = atom_coords[:, : , (ai_3 * 3):(ai_3 * 3 + 3)] - atom_coords[:, : , (ai_2 * 3):(ai_2 * 3 + 3)]\n # Use residue potential according to central atom\n angle_pots_to_use = angle_pots_flat[:, ai, :]\n elif across_res == 1:\n ba = atom_coords[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] - atom_coords[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)]\n bc = atom_coords[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] - atom_coords[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)]\n angle_pots_to_use = angle_pots_flat[:, ai, :-1]\n elif across_res == 2:\n ba = atom_coords[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] - atom_coords[:, 1: , (ai_2 * 3):(ai_2 * 3 + 3)]\n bc = atom_coords[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] - atom_coords[:, 1: , (ai_2 * 3):(ai_2 * 3 + 3)]\n angle_pots_to_use = angle_pots_flat[:, ai, 1:]\n ba_norms = ba.norm(dim=2)\n bc_norms = bc.norm(dim=2)\n angs = torch.acos((ba * bc).sum(dim=2) / (ba_norms * bc_norms))\n n_angles = n_res if across_res == 0 else n_res - 1\n angles_from_centres = angle_centres_flat[:, :n_angles] - angs.unsqueeze(2)\n angle_bin_inds = angles_from_centres.abs().argmin(dim=2).unsqueeze(2)\n angle_forces = 0.5 * (angle_pots_to_use.gather(2, angle_bin_inds) - angle_pots_to_use.gather(2, angle_bin_inds + 2))\n cross_ba_bc = torch.cross(ba, bc, dim=2)\n fa = angle_forces * normalize(torch.cross( ba, cross_ba_bc, dim=2), dim=2) / ba_norms.unsqueeze(2)\n fc = angle_forces * normalize(torch.cross(-bc, cross_ba_bc, dim=2), dim=2) / bc_norms.unsqueeze(2)\n fb = -fa -fc\n if across_res == 0:\n atom_accs[:, : , (ai_1 * 3):(ai_1 * 3 + 3)] += fa\n atom_accs[:, : , (ai_2 * 3):(ai_2 * 3 + 3)] += fb\n atom_accs[:, : , (ai_3 * 3):(ai_3 * 3 + 3)] += fc\n elif across_res == 1:\n atom_accs[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] += fa\n atom_accs[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)] += fb\n atom_accs[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] += fc\n elif across_res == 2:\n atom_accs[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] += fa\n atom_accs[:, 1: , (ai_2 * 3):(ai_2 * 3 + 3)] += fb\n atom_accs[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] += fc\n if printing or returning_energy:\n angle_energy += angle_pots_to_use.gather(2, angle_bin_inds + 1).sum()\n\n # Dihedral forces.py\n # across_res is the number of atoms in the next residue, starting from atom_4\n for di, (atom_1, atom_2, atom_3, atom_4, across_res) in enumerate(dihedrals):\n ai_1, ai_2, ai_3, ai_4 = atoms.index(atom_1), atoms.index(atom_2), atoms.index(atom_3), atoms.index(atom_4)\n if across_res == 1:\n ab = atom_coords[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)] - atom_coords[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)]\n bc = atom_coords[:, :-1, (ai_3 * 3):(ai_3 * 3 + 3)] - atom_coords[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)]\n cd = atom_coords[:, 1: , (ai_4 * 3):(ai_4 * 3 + 3)] - atom_coords[:, :-1, (ai_3 * 3):(ai_3 * 3 + 3)]\n # Use residue potential according to central atom\n dih_pots_to_use = dih_pots_flat[:, di, :-1]\n elif across_res == 2:\n ab = atom_coords[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)] - atom_coords[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)]\n bc = atom_coords[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] - atom_coords[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)]\n cd = atom_coords[:, 1: , (ai_4 * 3):(ai_4 * 3 + 3)] - atom_coords[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)]\n dih_pots_to_use = dih_pots_flat[:, di, 1:]\n elif across_res == 3:\n ab = atom_coords[:, 1: , (ai_2 * 3):(ai_2 * 3 + 3)] - atom_coords[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)]\n bc = atom_coords[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] - atom_coords[:, 1: , (ai_2 * 3):(ai_2 * 3 + 3)]\n cd = atom_coords[:, 1: , (ai_4 * 3):(ai_4 * 3 + 3)] - atom_coords[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)]\n dih_pots_to_use = dih_pots_flat[:, di, 1:]\n cross_ab_bc = torch.cross(ab, bc, dim=2)\n cross_bc_cd = torch.cross(bc, cd, dim=2)\n bc_norms = bc.norm(dim=2).unsqueeze(2)\n dihs = torch.atan2(\n torch.sum(torch.cross(cross_ab_bc, cross_bc_cd, dim=2) * bc / bc_norms, dim=2),\n torch.sum(cross_ab_bc * cross_bc_cd, dim=2)\n )\n dihs_from_centres = dih_centres_flat - dihs.unsqueeze(2)\n dih_bin_inds = dihs_from_centres.abs().argmin(dim=2).unsqueeze(2)\n dih_forces = 0.5 * (dih_pots_to_use.gather(2, dih_bin_inds) - dih_pots_to_use.gather(2, dih_bin_inds + 2))\n fa = dih_forces * normalize(-cross_ab_bc, dim=2) / ab.norm(dim=2).unsqueeze(2)\n fd = dih_forces * normalize( cross_bc_cd, dim=2) / cd.norm(dim=2).unsqueeze(2)\n # Forces on the middle atoms have to keep the sum of torques null\n # Forces taken from http://www.softberry.com/freedownloadhelp/moldyn/description.html\n fb = ((ab * -bc) / (bc_norms ** 2) - 1) * fa - ((cd * -bc) / (bc_norms ** 2)) * fd\n fc = -fa - fb - fd\n if across_res == 1:\n atom_accs[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] += fa\n atom_accs[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)] += fb\n atom_accs[:, :-1, (ai_3 * 3):(ai_3 * 3 + 3)] += fc\n atom_accs[:, 1: , (ai_4 * 3):(ai_4 * 3 + 3)] += fd\n elif across_res == 2:\n atom_accs[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] += fa\n atom_accs[:, :-1, (ai_2 * 3):(ai_2 * 3 + 3)] += fb\n atom_accs[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] += fc\n atom_accs[:, 1: , (ai_4 * 3):(ai_4 * 3 + 3)] += fd\n elif across_res == 3:\n atom_accs[:, :-1, (ai_1 * 3):(ai_1 * 3 + 3)] += fa\n atom_accs[:, 1: , (ai_2 * 3):(ai_2 * 3 + 3)] += fb\n atom_accs[:, 1: , (ai_3 * 3):(ai_3 * 3 + 3)] += fc\n atom_accs[:, 1: , (ai_4 * 3):(ai_4 * 3 + 3)] += fd\n if printing or returning_energy:\n dih_energy += dih_pots_to_use.gather(2, dih_bin_inds + 1).sum()\n\n accs += atom_accs.view(batch_size, n_atoms, 3) / masses.unsqueeze(2)\n\n # Shortcut to return energy at a given step\n if returning_energy:\n return dist_energy + angle_energy + dih_energy\n\n if integrator == \"vel\":\n vels = vels + 0.5 * (accs_last + accs) * timestep\n accs_last = accs\n elif integrator == \"no_vel\":\n coords_next = 2 * coords - coords_last + accs * timestep * timestep\n coords_last = coords\n coords = coords_next\n elif integrator == \"langevin\":\n # From Gronbech-Jensen 2013\n vels = vels + 0.5 * timestep * (accs_last + accs) - alpha * (coords - coords_last) / masses.unsqueeze(2) + beta / masses.unsqueeze(2)\n accs_last = accs\n elif integrator == \"langevin_simple\":\n gamma, twokbT = thermostat_const, temperature\n accs = accs + (-gamma * vels + np.sqrt(gamma * twokbT) * torch.randn(vels.shape, device=device)) / masses.unsqueeze(2)\n vels = vels + 0.5 * (accs_last + accs) * timestep\n accs_last = accs\n elif integrator == \"min\":\n coords = coords + accs * 0.1\n\n # Apply thermostat\n if integrator in (\"vel\", \"no_vel\") and thermostat_const > 0.0:\n thermostat_prob = timestep / thermostat_const\n for ai in range(n_atoms):\n if random() < thermostat_prob:\n if integrator == \"vel\":\n # Actually this should be divided by the mass\n new_vel = torch.randn(3, device=device) * temperature\n vels[0, ai] = new_vel\n elif integrator == \"no_vel\":\n new_diff = torch.randn(3, device=device) * temperature * timestep\n coords_last[0, ai] = coords[0, ai] - new_diff\n\n if printing:\n total_energy = dist_energy + angle_energy + dih_energy\n out_line = \" Step {:8} / {} - acc {:6.3f} {}- energy {:6.2f} ( {:6.2f} {:6.2f} {:6.2f} ) - Cα RMSD {:6.2f}\".format(\n i + 1, n_steps, torch.mean(accs.norm(dim=2)).item(),\n \"- vel {:6.3f} \".format(torch.mean(vels.norm(dim=2)).item()) if integrator in (\"vel\", \"langevin\", \"langevin_simple\") else \"\",\n total_energy.item(), dist_energy.item(), angle_energy.item(), dih_energy.item(),\n rmsd(coords.view(batch_size, n_res, 3 * len(atoms))[0, :, 3:6], native_coords_ca)[0].item())\n report(out_line, 2, verbosity)\n\n if sim_filepath and i % report_n == 0:\n model_n += 1\n with open(sim_filepath, \"a\") as of:\n of.write(\"MODEL {:>8}\\n\".format(model_n))\n for ri, r in enumerate(seq):\n for ai, atom in enumerate(atoms):\n of.write(\"ATOM {:>4} {:<2} {:3} A{:>4} {:>8.3f}{:>8.3f}{:>8.3f} 1.00 0.00 {:>2} \\n\".format(\n len(atoms) * ri + ai + 1, atom[:2].upper(),\n one_to_three_aas[r], ri + 1,\n coords[0, len(atoms) * ri + ai, 0].item(),\n coords[0, len(atoms) * ri + ai, 1].item(),\n coords[0, len(atoms) * ri + ai, 2].item(),\n atom[0].upper()))\n of.write(\"ENDMDL\\n\")\n\n return coords\n\n# RMSD between two sets of coordinates with shape (n_atoms, 3) using the Kabsch algorithm\n# Returns the RMSD and whether convergence was reached\ndef rmsd(c1, c2):\n device = c1.device\n r1 = c1.transpose(0, 1)\n r2 = c2.transpose(0, 1)\n P = r1 - r1.mean(1).view(3, 1)\n Q = r2 - r2.mean(1).view(3, 1)\n cov = torch.matmul(P, Q.transpose(0, 1))\n try:\n U, S, V = torch.svd(cov)\n except RuntimeError:\n report(\" SVD failed to converge\", 0)\n return torch.tensor([20.0], device=device), False\n d = torch.tensor([\n [1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, torch.det(torch.matmul(V, U.transpose(0, 1)))]\n ], device=device)\n rot = torch.matmul(torch.matmul(V, d), U.transpose(0, 1))\n rot_P = torch.matmul(rot, P)\n diffs = rot_P - Q\n msd = (diffs ** 2).sum() / diffs.size(1)\n return msd.sqrt(), True\n\n# Generate starting coordinates\n# conformation is extended/predss/random/helix\ndef starting_coords(seq, conformation=\"extended\", input_file=\"\", device=\"cpu\"):\n import PeptideBuilder\n\n coords = torch.zeros(len(seq) * len(atoms), 3, device=device)\n backbone_atoms = (\"N\", \"CA\", \"C\", \"O\")\n ss_phis = {\"C\": -120.0, \"H\": -60.0, \"E\": -120.0}\n ss_psis = {\"C\": 140.0, \"H\": -60.0, \"E\": 140.0}\n\n if conformation == \"predss\":\n with open(input_file) as f:\n ss_pred = f.readlines()[1].rstrip()\n for i, r in enumerate(seq):\n r_to_use = \"A\" if r == \"G\" else r\n if i == 0:\n structure = PeptideBuilder.initialize_res(r_to_use)\n elif conformation == \"predss\":\n structure = PeptideBuilder.add_residue(structure, r_to_use, ss_phis[ss_pred[i]], ss_psis[ss_pred[i]])\n elif conformation == \"random\":\n # ϕ can be -180° -> -30°, ψ can be anything\n phi = -180 + random() * 150\n psi = -180 + random() * 360\n structure = PeptideBuilder.add_residue(structure, r_to_use, phi, psi)\n elif conformation == \"helix\":\n structure = PeptideBuilder.add_residue(structure, r_to_use, ss_phis[\"H\"], ss_psis[\"H\"])\n elif conformation == \"extended\":\n coil_level = 30.0\n phi = -120.0 + gauss(0.0, coil_level)\n psi = 140.0 + gauss(0.0, coil_level)\n structure = PeptideBuilder.add_residue(structure, r_to_use, phi, psi)\n else:\n raise(AssertionError(f\"Invalid conformation {conformation}\"))\n for ai, atom in enumerate(atoms):\n if atom == \"cent\":\n coords[len(atoms) * i + ai] = torch.tensor(\n [at.coord for at in structure[0][\"A\"][i + 1] if at.name not in backbone_atoms],\n dtype=torch.float, device=device).mean(dim=0)\n else:\n coords[len(atoms) * i + ai] = torch.tensor(structure[0][\"A\"][i + 1][atom].coord,\n dtype=torch.float, device=device)\n return coords\n\n# Print a protein data file from a PDB/mmCIF file and an optional PSIPRED ss2 file\ndef print_input_file(structure_file, ss2_file=None):\n extension = os.path.basename(structure_file).rsplit(\".\", 1)[-1].lower()\n if extension in (\"cif\", \"mmcif\"):\n from Bio.PDB import MMCIFParser\n parser = MMCIFParser()\n else:\n from Bio.PDB import PDBParser\n parser = PDBParser()\n struc = parser.get_structure(\"\", structure_file)\n\n seq = \"\"\n coords = []\n for chain in struc[0]:\n for res in chain:\n # Skip hetero and water residues\n if res.id[0] != \" \":\n continue\n seq += three_to_one_aas[res.get_resname()]\n if res.get_resname() == \"GLY\":\n # Extend vector of length 1 Å from Cα to act as fake centroid\n d = res[\"CA\"].get_coord() - res[\"C\"].get_coord() + res[\"CA\"].get_coord() - res[\"N\"].get_coord()\n coord_cent = res[\"CA\"].get_coord() + d / np.linalg.norm(d)\n else:\n # Centroid coordinates of sidechain heavy atoms\n atom_coords = []\n for atom in res:\n if atom.get_name() not in (\"N\", \"CA\", \"C\", \"O\") and atom.element != \"H\":\n atom_coords.append(atom.get_coord())\n coord_cent = np.array(atom_coords).mean(0)\n coords.append([res[\"N\"].get_coord(), res[\"CA\"].get_coord(), res[\"C\"].get_coord(), coord_cent])\n\n print(seq)\n if ss2_file:\n # Extract 3-state secondary structure prediction from PSIPRED ss2 output file\n ss_pred = \"\"\n with open(ss2_file) as f:\n for line in f:\n if len(line.rstrip()) > 0 and not line.startswith(\"#\"):\n ss_pred += line.split()[2]\n assert len(seq) == len(ss_pred), f\"Sequence length is {len(seq)} but SS prediction length is {len(ss_pred)}\"\n print(ss_pred)\n else:\n print(\"C\" * len(seq))\n\n def coord_str(coord):\n return \" \".join([str(round(c, 3)) for c in coord])\n\n for coord_n, coord_ca, coord_c, coord_cent in coords:\n print(f\"{coord_str(coord_n)} {coord_str(coord_ca)} {coord_str(coord_c)} {coord_str(coord_cent)}\")\n\ndef fixed_backbone_design(input_file, simulator, n_mutations=2_000, n_min_steps=100,\n print_color=True, device=\"cpu\", verbosity=0):\n if print_color:\n from colorama import Fore, Style\n highlight_open = Fore.RED\n highlight_close = Style.RESET_ALL\n else:\n highlight_open = \"\"\n highlight_close = \"\"\n\n coords, inters_flat, inters_ang, inters_dih, masses, native_seq = read_input_file(input_file, device=device)\n energy_native_min = simulator(coords.unsqueeze(0), inters_flat.unsqueeze(0),\n inters_ang.unsqueeze(0), inters_dih.unsqueeze(0),\n masses.unsqueeze(0), native_seq, coords.unsqueeze(0),\n n_min_steps, integrator=\"min\", energy=True,\n verbosity=verbosity).item()\n print(f\"Native score is {energy_native_min:6.1f}\")\n\n aa_weights = [pdb_aa_frequencies[aa] for aa in aas]\n seq = \"\".join(choices(aas, weights=aa_weights, k=len(native_seq)))\n coords, inters_flat, inters_ang, inters_dih, masses, seq = read_input_file_threaded(\n input_file, seq, device=device)\n energy_min = simulator(coords.unsqueeze(0), inters_flat.unsqueeze(0),\n inters_ang.unsqueeze(0), inters_dih.unsqueeze(0),\n masses.unsqueeze(0), seq, coords.unsqueeze(0),\n n_min_steps, integrator=\"min\", energy=True, verbosity=verbosity).item()\n\n for mi in range(n_mutations):\n mutate_i = randrange(len(seq))\n rand_aa = choices(aas, weights=aa_weights, k=1)[0]\n # Ensure we don't randomly choose the same residue\n while rand_aa == seq[mutate_i]:\n rand_aa = choices(aas, weights=aa_weights, k=1)[0]\n new_seq = seq[:mutate_i] + rand_aa + seq[(mutate_i + 1):]\n coords, inters_flat, inters_ang, inters_dih, masses, new_seq = read_input_file_threaded(\n input_file, new_seq, device=device)\n new_energy_min = simulator(coords.unsqueeze(0), inters_flat.unsqueeze(0),\n inters_ang.unsqueeze(0), inters_dih.unsqueeze(0),\n masses.unsqueeze(0), new_seq, coords.unsqueeze(0),\n n_min_steps, integrator=\"min\", energy=True,\n verbosity=verbosity).item()\n\n if new_energy_min < energy_min:\n decision = \"accept_lower\"\n elif new_energy_min - energy_min < 10.0 and random() < -0.25 + 0.5 * (n_mutations - mi) / n_mutations:\n decision = \"accept_chance\"\n else:\n decision = \"reject\"\n print(\"{:5} / {:5} | {:6.1f} | {:13} | {:5.3f} | {}\".format(mi + 1, n_mutations, new_energy_min,\n decision, sum(1 for r1, r2 in zip(new_seq, native_seq) if r1 == r2) / len(native_seq),\n \"\".join([f\"{highlight_open}{r1}{highlight_close}\" if r1 == r2 else r1 for r1, r2 in zip(new_seq, native_seq)])))\n if decision.startswith(\"accept\"):\n seq = new_seq\n energy_min = new_energy_min\n\n print(\" final | {:6.1f} | {:13} | {:5.3f} | {}\".format(energy_min,\n \"-\", sum(1 for r1, r2 in zip(seq, native_seq) if r1 == r2) / len(native_seq),\n \"\".join([f\"{highlight_open}{r1}{highlight_close}\" if r1 == r2 else r1 for r1, r2 in zip(seq, native_seq)])))\n\ndef train(model_filepath, device=\"cpu\", verbosity=0):\n max_n_steps = 2_000\n learning_rate = 1e-4\n n_accumulate = 100\n\n simulator = Simulator(\n torch.zeros(len(interactions), n_bins_pot, device=device),\n torch.zeros(len(angles), n_aas, n_bins_pot, device=device),\n torch.zeros(len(dihedrals), n_aas * len(ss_types), n_bins_pot + 2, device=device)\n )\n\n train_set = ProteinDataset(train_proteins, train_val_dir, device=device)\n val_set = ProteinDataset(val_proteins , train_val_dir, device=device)\n\n optimizer = torch.optim.Adam(simulator.parameters(), lr=learning_rate)\n\n report(\"Starting training\", 0, verbosity)\n for ei in count(start=0, step=1):\n # After 37 epochs reset the optimiser with a lower learning rate\n if ei == 37:\n optimizer = torch.optim.Adam(simulator.parameters(), lr=learning_rate / 2)\n\n train_rmsds, val_rmsds = [], []\n n_steps = min(250 * ((ei // 5) + 1), max_n_steps) # Scale up n_steps over epochs\n train_inds = list(range(len(train_set)))\n val_inds = list(range(len(val_set)))\n shuffle(train_inds)\n shuffle(val_inds)\n simulator.train()\n optimizer.zero_grad()\n for i, ni in enumerate(train_inds):\n native_coords, inters_flat, inters_ang, inters_dih, masses, seq = train_set[ni]\n coords = simulator(native_coords.unsqueeze(0), inters_flat.unsqueeze(0),\n inters_ang.unsqueeze(0), inters_dih.unsqueeze(0), masses.unsqueeze(0),\n seq, native_coords.unsqueeze(0), n_steps, verbosity=verbosity)\n loss, passed = rmsd(coords[0], native_coords)\n train_rmsds.append(loss.item())\n if passed:\n loss_log = torch.log(1.0 + loss)\n loss_log.backward()\n report(\" Training {:4} / {:4} - RMSD {:6.2f} over {:4} steps and {:3} residues\".format(\n i + 1, len(train_set), loss.item(), n_steps, len(seq)), 1, verbosity)\n if (i + 1) % n_accumulate == 0:\n optimizer.step()\n optimizer.zero_grad()\n simulator.eval()\n with torch.no_grad():\n for i, ni in enumerate(val_inds):\n native_coords, inters_flat, inters_ang, inters_dih, masses, seq = val_set[ni]\n coords = simulator(native_coords.unsqueeze(0), inters_flat.unsqueeze(0),\n inters_ang.unsqueeze(0), inters_dih.unsqueeze(0), masses.unsqueeze(0),\n seq, native_coords.unsqueeze(0), n_steps, verbosity=verbosity)\n loss, passed = rmsd(coords[0], native_coords)\n val_rmsds.append(loss.item())\n report(\" Validation {:4} / {:4} - RMSD {:6.2f} over {:4} steps and {:3} residues\".format(\n i + 1, len(val_set), loss.item(), n_steps, len(seq)), 1, verbosity)\n torch.save({\"distances\": simulator.ff_distances.data,\n \"angles\" : simulator.ff_angles.data,\n \"dihedrals\": simulator.ff_dihedrals.data,\n \"optimizer\": optimizer.state_dict()},\n model_filepath)\n report(\"Epoch {:4} - med train/val RMSD {:6.3f} / {:6.3f} over {:4} steps\".format(\n ei + 1, np.median(train_rmsds), np.median(val_rmsds), n_steps), 0, verbosity)\n"
] |
[
[
"torch.nn.functional.normalize",
"torch.svd",
"torch.nn.Parameter",
"torch.ones",
"numpy.sqrt",
"torch.zeros",
"torch.randn",
"numpy.median",
"torch.sum",
"numpy.linalg.norm",
"torch.tensor",
"torch.matmul",
"torch.no_grad",
"torch.log",
"numpy.array",
"numpy.loadtxt",
"torch.cross"
]
] |
marciojustino/data-arrays-lib
|
[
"d2495eb9d00d5ee3a885d6f215d9c28eba9fab66"
] |
[
"main.py"
] |
[
"import random\nimport numpy as np\nfrom src.data_arrays import DataArrays\nimport datetime\n\na = np.random.randint(1, 10000, 1000000)\n\ndataArrays = DataArrays()\nstart_at = datetime.datetime.now()\nb = dataArrays.sort(a)\nends_at = datetime.datetime.now()\nprint(\"Duration: {}\".format(ends_at-start_at))\n"
] |
[
[
"numpy.random.randint"
]
] |
jurasofish/pandapower
|
[
"630e3278ca012535f78282ae73f1b86f3fe932fc"
] |
[
"pandapower/test/opf/test_basic.py"
] |
[
"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport numpy as np\nimport pytest\n\nimport pandapower as pp\nfrom pandapower.convert_format import convert_format\nfrom pandapower.networks import simple_four_bus_system\nfrom pandapower.test.toolbox import add_grid_connection\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef simplest_grid():\n # boundaries:\n vm_max = 1.05\n vm_min = 0.95\n\n # create net\n net = pp.create_empty_network()\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15,\n max_q_mvar=0.005, min_q_mvar=-0.005)\n pp.create_ext_grid(net, 0)\n pp.create_load(net, 1, p_mw=0.02, controllable=False)\n pp.create_line_from_parameters(net, 0, 1, 50, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100)\n pp.create_poly_cost(net, 0, \"gen\", cp1_eur_per_mw=0.1)\n\n return net\n\n\[email protected]\ndef simple_opf_test_net():\n net = pp.create_empty_network()\n pp.create_bus(net, vn_kv=10.)\n pp.create_bus(net, vn_kv=.4)\n pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=0.05,\n min_q_mvar=-.005)\n pp.create_ext_grid(net, 0)\n pp.create_load(net, 1, p_mw=0.020, controllable=False)\n pp.create_line_from_parameters(net, 0, 1, 50, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100)\n return net\n\n\ndef test_convert_format():\n \"\"\" Testing a very simple network without transformer for voltage\n constraints with OPF \"\"\"\n\n # boundaries:\n vm_max = 1.05\n vm_min = 0.95\n\n # create net\n net = pp.create_empty_network()\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15,\n max_q_mvar=0.05, min_q_mvar=-0.005)\n net.gen[\"cost_per_mw\"] = 100\n pp.create_ext_grid(net, 0)\n pp.create_load(net, 1, p_mw=0.02, controllable=False)\n pp.create_line_from_parameters(net, 0, 1, 50, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100 * 690)\n # run OPF\n convert_format(net)\n\n for init in [\"pf\", \"flat\"]:\n pp.runopp(net, init=init)\n assert net[\"OPF_converged\"]\n\n # check and assert result\n logger.debug(\"test_simplest_voltage\")\n logger.debug(\"res_gen:\\n%s\" % net.res_gen)\n logger.debug(\"res_ext_grid:\\n%s\" % net.res_ext_grid)\n logger.debug(\"res_bus.vm_pu: \\n%s\" % net.res_bus.vm_pu)\n assert max(net.res_bus.vm_pu) < vm_max\n assert min(net.res_bus.vm_pu) > vm_min\n\n\ndef test_simplest_voltage():\n \"\"\" Testing a very simple network without transformer for voltage\n constraints with OPF \"\"\"\n\n # boundaries:\n vm_max = 1.05\n vm_min = 0.95\n\n net = simplest_grid()\n # run OPF\n for init in [\"pf\", \"flat\"]:\n pp.runopp(net, init=init)\n assert net[\"OPF_converged\"]\n\n # check and assert result\n logger.debug(\"test_simplest_voltage\")\n logger.debug(\"res_gen:\\n%s\" % net.res_gen)\n logger.debug(\"res_ext_grid:\\n%s\" % net.res_ext_grid)\n logger.debug(\"res_bus.vm_pu: \\n%s\" % net.res_bus.vm_pu)\n assert max(net.res_bus.vm_pu) < vm_max\n assert min(net.res_bus.vm_pu) > vm_min\n\n pp.runopp(net, check_connectivity=True)\n assert net[\"OPF_converged\"]\n\n # check and assert result\n logger.debug(\"test_simplest_voltage\")\n logger.debug(\"res_gen:\\n%s\" % net.res_gen)\n logger.debug(\"res_ext_grid:\\n%s\" % net.res_ext_grid)\n logger.debug(\"res_bus.vm_pu: \\n%s\" % net.res_bus.vm_pu)\n assert max(net.res_bus.vm_pu) < vm_max\n assert min(net.res_bus.vm_pu) > vm_min\n\n\n# def test_eg_voltage():\n# \"\"\" Testing a very simple network without transformer for voltage\n# constraints with OPF \"\"\"\n#\n# # boundaries:\n# vm_max = 1.05\n# vm_min = 0.95\n#\n# # create net\n# net = pp.create_empty_network()\n# pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n# pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n# pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.150, max_q_mvar=0.05,\n# min_q_mvar=-0.05)\n# pp.create_ext_grid(net, 0, vm_pu=1.01)\n# pp.create_load(net, 1, p_mw=0.02, controllable=False)\n# pp.create_line_from_parameters(net, 0, 1, 50, name=\"line2\", r_ohm_per_km=0.876,\n# c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n# max_loading_percent=100)\n# # run OPF\n# for init in [\"pf\", \"flat\"]:\n# pp.runopp(net, init=init)\n# assert net[\"OPF_converged\"]\n#\n# # check and assert result\n# logger.debug(\"test_simplest_voltage\")\n# logger.debug(\"res_gen:\\n%s\" % net.res_gen)\n# logger.debug(\"res_ext_grid:\\n%s\" % net.res_ext_grid)\n# logger.debug(\"res_bus.vm_pu: \\n%s\" % net.res_bus.vm_pu)\n# assert net.res_bus.vm_pu.at[0] == net.ext_grid.vm_pu.values\n\n\ndef test_simplest_dispatch():\n \"\"\" Testing a very simple network without transformer for voltage\n constraints with OPF \"\"\"\n\n # boundaries:\n vm_max = 1.05\n vm_min = 0.95\n\n # create net\n net = pp.create_empty_network()\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.150, max_q_mvar=0.05,\n min_q_mvar=-0.05)\n pp.create_poly_cost(net, 0, \"gen\", cp1_eur_per_mw=100)\n pp.create_ext_grid(net, 0)\n pp.create_poly_cost(net, 0, \"ext_grid\", cp1_eur_per_mw=101)\n pp.create_load(net, 1, p_mw=0.02, controllable=False)\n pp.create_line_from_parameters(net, 0, 1, 50, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100 * 690)\n # run OPF\n for init in [\"pf\", \"flat\"]:\n pp.runopp(net, cost_function=\"linear\", init=init)\n assert net[\"OPF_converged\"]\n\n # check and assert result\n logger.debug(\"test_simplest_voltage\")\n logger.debug(\"res_gen:\\n%s\" % net.res_gen)\n logger.debug(\"res_est_grid:\\n%s\" % net.res_ext_grid)\n logger.debug(\"res_bus.vm_pu: \\n%s\" % net.res_bus.vm_pu)\n assert max(net.res_bus.vm_pu) < vm_max\n assert min(net.res_bus.vm_pu) > vm_min\n\n\ndef test_opf_gen_voltage():\n \"\"\" Testing a simple network with transformer for voltage\n constraints with OPF using a generator \"\"\"\n\n # boundaries:\n vm_max = 1.05\n vm_min = 0.95\n\n # ceate net\n net = pp.create_empty_network()\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_transformer_from_parameters(net, 0, 1, vk_percent=3.75,\n tap_max=2, vn_lv_kv=0.4,\n shift_degree=150, tap_neutral=0,\n vn_hv_kv=10.0, vkr_percent=2.8125,\n tap_pos=0, tap_side=\"hv\", tap_min=-2,\n tap_step_percent=2.5, i0_percent=0.68751,\n sn_mva=0.016, pfe_kw=0.11, name=None,\n in_service=True, index=None, max_loading_percent=200)\n pp.create_gen(net, 3, p_mw=0.01, controllable=True, min_p_mw=0, max_p_mw=0.025, max_q_mvar=0.5,\n min_q_mvar=-0.5)\n pp.create_poly_cost(net, 0, \"gen\", cp1_eur_per_mw=10)\n pp.create_ext_grid(net, 0)\n pp.create_line_from_parameters(net, 1, 2, 1, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100000)\n pp.create_line_from_parameters(net, 2, 3, 1, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100000)\n\n # run OPF\n for init in [\"pf\", \"flat\"]:\n pp.runopp(net, init=init, calculate_voltage_angles=False)\n assert net[\"OPF_converged\"]\n\n # check and assert result\n logger.debug(\"test_opf_gen_voltage\")\n logger.debug(\"res_gen:\\n%s\" % net.res_gen)\n logger.debug(\"res_bus.vm_pu: \\n%s\" % net.res_bus.vm_pu)\n assert max(net.res_bus.vm_pu) < vm_max\n assert min(net.res_bus.vm_pu) > vm_min\n\n\ndef test_opf_sgen_voltage():\n \"\"\" Testing a simple network with transformer for voltage\n constraints with OPF using a static generator \"\"\"\n\n # boundaries\n vm_max = 1.04\n vm_min = 0.96\n\n # create net\n net = pp.create_empty_network()\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_transformer_from_parameters(net, 0, 1, vk_percent=3.75,\n tap_max=2, vn_lv_kv=0.4,\n shift_degree=150, tap_neutral=0,\n vn_hv_kv=10.0, vkr_percent=2.8125,\n tap_pos=0, tap_side=\"hv\", tap_min=-2,\n tap_step_percent=2.5, i0_percent=0.68751,\n sn_mva=0.016, pfe_kw=0.11, name=None,\n in_service=True, index=None, max_loading_percent=1000000)\n pp.create_sgen(net, 3, p_mw=0.01, controllable=True, min_p_mw=-0.005, max_p_mw=0.015,\n max_q_mvar=0.025, min_q_mvar=-0.025)\n pp.create_poly_cost(net, 0, \"sgen\", cp1_eur_per_mw=0.1)\n pp.create_ext_grid(net, 0)\n pp.create_line_from_parameters(net, 1, 2, 1, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=1000000)\n pp.create_line_from_parameters(net, 2, 3, 1, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=1000000)\n\n # run OPF\n for init in [\"pf\", \"flat\"]:\n pp.runopp(net, init=init, calculate_voltage_angles=False)\n assert net[\"OPF_converged\"]\n\n # assert and check result\n logger.debug(\"test_opf_sgen_voltage\")\n logger.debug(\"res_sgen:\\n%s\" % net.res_sgen)\n logger.debug(\"res_bus.vm_pu: \\n%s\" % net.res_bus.vm_pu)\n assert max(net.res_bus.vm_pu) < vm_max\n assert min(net.res_bus.vm_pu) > vm_min\n\n\ndef test_opf_gen_loading():\n \"\"\" Testing a simple network with transformer for loading\n constraints with OPF using a generator \"\"\"\n\n # wide open voltage boundaries to make sure they don't interfere with loading constraints\n vm_max = 1.5\n vm_min = 0.5\n max_line_loading = 11\n\n # create net\n net = pp.create_empty_network()\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_transformer_from_parameters(net, 0, 1, vk_percent=3.75,\n tap_max=2, vn_lv_kv=0.4,\n shift_degree=150, tap_neutral=0,\n vn_hv_kv=10.0, vkr_percent=2.8125,\n tap_pos=0, tap_side=\"hv\", tap_min=-2,\n tap_step_percent=2.5, i0_percent=0.68751,\n sn_mva=0.016, pfe_kw=0.11, name=None,\n in_service=True, index=None, max_loading_percent=145)\n pp.create_gen(net, 3, p_mw=0.01, controllable=True, min_p_mw=0.005, max_p_mw=0.015,\n max_q_mvar=0.05, min_q_mvar=-0.05)\n pp.create_poly_cost(net, 0, \"gen\", cp1_eur_per_mw=-10)\n pp.create_ext_grid(net, 0)\n pp.create_poly_cost(net, 0, \"ext_grid\", cp1_eur_per_mw=.1)\n pp.create_line_from_parameters(net, 1, 2, 1, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=max_line_loading)\n pp.create_line_from_parameters(net, 2, 3, 1, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=max_line_loading)\n\n # run OPF\n\n pp.runopp(net, OPF_VIOLATION=1e-1, OUT_LIM_LINE=2,\n PDIPM_GRADTOL=1e-10, PDIPM_COMPTOL=1e-10, PDIPM_COSTTOL=1e-10, calculate_voltage_angles=False)\n assert net[\"OPF_converged\"]\n\n # assert and check result\n logger.debug(\"test_opf_gen_loading\")\n logger.debug(\"res_gen:\\n%s\" % net.res_gen)\n logger.debug(\"res_line.loading_percent:\\n%s\" % net.res_line.loading_percent)\n assert max(net.res_line.loading_percent) < max_line_loading\n logger.debug(\"res_trafo.loading_percent:\\n%s\" % net.res_trafo.loading_percent)\n assert max(net.res_trafo.loading_percent) < 145\n assert max(net.res_bus.vm_pu) < vm_max\n assert min(net.res_bus.vm_pu) > vm_min\n\n\ndef test_opf_sgen_loading():\n \"\"\" Testing a simple network with transformer for loading\n constraints with OPF using a generator \"\"\"\n\n # boundaries\n vm_max = 1.5\n vm_min = 0.5\n max_trafo_loading = 800\n max_line_loading = 13\n\n # create net\n net = pp.create_empty_network()\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_transformer_from_parameters(net, 0, 1, vk_percent=3.75, tap_max=2, vn_lv_kv=0.4,\n shift_degree=150, tap_neutral=0, vn_hv_kv=10.0,\n vkr_percent=2.8125, tap_pos=0, tap_side=\"hv\", tap_min=-2,\n tap_step_percent=2.5, i0_percent=0.68751, sn_mva=0.016,\n pfe_kw=0.11, name=None, in_service=True, index=None,\n max_loading_percent=max_trafo_loading)\n pp.create_sgen(net, 3, p_mw=0.01, controllable=True, min_p_mw=0.005, max_p_mw=.015,\n max_q_mvar=0.025, min_q_mvar=-0.025)\n pp.create_poly_cost(net, 0, \"sgen\", cp1_eur_per_mw=-10)\n pp.create_ext_grid(net, 0)\n pp.create_poly_cost(net, 0, \"ext_grid\", cp1_eur_per_mw=.1)\n pp.create_line_from_parameters(net, 1, 2, 1, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=max_line_loading)\n pp.create_line_from_parameters(net, 2, 3, 1, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=max_line_loading)\n\n # run OPF\n for init in [\"pf\", \"flat\"]:\n pp.runopp(net, init=init, calculate_voltage_angles=False)\n assert net[\"OPF_converged\"]\n\n # assert and check result\n logger.debug(\"test_opf_sgen_loading\")\n logger.debug(\"res_sgen:\\n%s\" % net.res_sgen)\n logger.debug(\"res_line.loading_percent:\\n%s\" % net.res_line.loading_percent)\n assert max(net.res_line.loading_percent) - max_line_loading < 0.15\n logger.debug(\"res_trafo.loading_percent:\\n%s\" % net.res_trafo.loading_percent)\n assert max(net.res_trafo.loading_percent) < max_trafo_loading\n assert max(net.res_bus.vm_pu) < vm_max\n assert min(net.res_bus.vm_pu) > vm_min\n # check connectivity check\n pp.runopp(net, check_connectivity=True, calculate_voltage_angles=False)\n\n\ndef test_unconstrained_line():\n \"\"\" Testing a very simple network without transformer for voltage\n constraints with OPF \"\"\"\n\n # boundaries:\n vm_max = 1.05\n vm_min = 0.95\n\n # create net\n net = pp.create_empty_network()\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=0.05,\n min_q_mvar=-0.05)\n pp.create_ext_grid(net, 0)\n pp.create_load(net, 1, p_mw=0.02, controllable=False)\n pp.create_line_from_parameters(net, 0, 1, 50, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876)\n pp.create_poly_cost(net, 0, \"gen\", cp1_eur_per_mw=1)\n # run OPF\n for init in [\"pf\", \"flat\"]:\n pp.runopp(net, init=init, calculate_voltage_angles=False)\n assert net[\"OPF_converged\"]\n\n # check and assert result\n logger.debug(\"test_simplest_voltage\")\n logger.debug(\"res_gen:\\n%s\" % net.res_gen)\n logger.debug(\"res_ext_grid:\\n%s\" % net.res_ext_grid)\n logger.debug(\"res_bus.vm_pu: \\n%s\" % net.res_bus.vm_pu)\n assert max(net.res_bus.vm_pu) < vm_max\n assert min(net.res_bus.vm_pu) > vm_min\n\n\ndef test_trafo3w_loading():\n net = pp.create_empty_network()\n b1, b2, l1 = add_grid_connection(net, vn_kv=110.)\n b3 = pp.create_bus(net, vn_kv=20.)\n b4 = pp.create_bus(net, vn_kv=10.)\n tidx = pp.create_transformer3w(net, b2, b3, b4, std_type='63/25/38 MVA 110/20/10 kV',\n max_loading_percent=120)\n pp.create_load(net, b3, p_mw=5, controllable=False)\n load_id = pp.create_load(net, b4, p_mw=5, controllable=True, max_p_mw=50, min_p_mw=0, min_q_mvar=-1e6,\n max_q_mvar=1e6)\n pp.create_poly_cost(net, load_id, \"load\", cp1_eur_per_mw=-1000)\n # pp.create_xward(net, b4, 1000, 1000, 1000, 1000, 0.1, 0.1, 1.0)\n net.trafo3w.shift_lv_degree.at[tidx] = 120\n net.trafo3w.shift_mv_degree.at[tidx] = 80\n\n # pp.runopp(net, calculate_voltage_angles = True) >> Doesn't converge\n for init in [\"pf\", \"flat\"]:\n pp.runopp(net, calculate_voltage_angles=False, init=init)\n assert net[\"OPF_converged\"]\n assert abs(net.res_trafo3w.loading_percent.values - 120) < 1e-3\n\n\ndef test_dcopf_poly(simple_opf_test_net):\n net = simple_opf_test_net\n pp.create_poly_cost(net, 0, \"gen\", cp1_eur_per_mw=100)\n # run OPF\n pp.rundcopp(net)\n\n # check and assert result\n logger.debug(\"test_simplest_voltage\")\n logger.debug(\"res_gen:\\n%s\" % net.res_gen)\n logger.debug(\"res_ext_grid:\\n%s\" % net.res_ext_grid)\n logger.debug(\"res_bus.vm_pu: \\n%s\" % net.res_bus.vm_pu)\n assert abs(100 * net.res_gen.p_mw.values - net.res_cost) < 1e-3\n\n\ndef test_opf_poly(simple_opf_test_net):\n net = simple_opf_test_net\n pp.create_poly_cost(net, 0, \"gen\", cp1_eur_per_mw=100)\n # run OPF\n for init in [\"pf\", \"flat\"]:\n pp.runopp(net, init=init)\n assert net[\"OPF_converged\"]\n # check and assert result\n logger.debug(\"test_simplest_voltage\")\n logger.debug(\"res_gen:\\n%s\" % net.res_gen)\n logger.debug(\"res_ext_grid:\\n%s\" % net.res_ext_grid)\n logger.debug(\"res_bus.vm_pu: \\n%s\" % net.res_bus.vm_pu)\n assert abs(100 * net.res_gen.p_mw.values - net.res_cost) < 1e-3\n\n\ndef test_opf_pwl(simple_opf_test_net):\n # create net\n net = simple_opf_test_net\n pp.create_pwl_cost(net, 0, \"gen\", [[0, 100, 100], [100, 200, 100]])\n # run OPF\n for init in [\"pf\", \"flat\"]:\n pp.runopp(net, init=init)\n assert net[\"OPF_converged\"]\n\n # check and assert result\n logger.debug(\"test_simplest_voltage\")\n logger.debug(\"res_gen:\\n%s\" % net.res_gen)\n logger.debug(\"res_ext_grid:\\n%s\" % net.res_ext_grid)\n logger.debug(\"res_bus.vm_pu: \\n%s\" % net.res_bus.vm_pu)\n assert abs(100 * net.res_gen.p_mw.values - net.res_cost) < 1e-3\n\n\ndef test_dcopf_pwl(simple_opf_test_net):\n # create net\n net = simple_opf_test_net\n pp.create_pwl_cost(net, 0, \"gen\", [[0, 100, 100], [100, 200, 100]])\n pp.create_pwl_cost(net, 0, \"ext_grid\", [[0, 100, 0], [100, 200, 0]])\n # run OPF\n pp.rundcopp(net)\n assert net[\"OPF_converged\"]\n\n # check and assert result\n logger.debug(\"test_simplest_voltage\")\n logger.debug(\"res_gen:\\n%s\" % net.res_gen)\n logger.debug(\"res_ext_grid:\\n%s\" % net.res_ext_grid)\n logger.debug(\"res_bus.vm_pu: \\n%s\" % net.res_bus.vm_pu)\n assert abs(100 * net.res_gen.p_mw.values - net.res_cost) < 1e-3\n\n\ndef test_opf_varying_max_line_loading():\n \"\"\" Testing a simple network with transformer for loading\n constraints with OPF using a generator \"\"\"\n\n # boundaries\n vm_max = 1.5\n vm_min = 0.5\n max_trafo_loading = 800\n max_line_loading = 13\n\n # create net\n net = pp.create_empty_network()\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_transformer_from_parameters(net, 0, 1, vk_percent=3.75, tap_max=2, vn_lv_kv=0.4,\n shift_degree=150, tap_neutral=0, vn_hv_kv=10.0,\n vkr_percent=2.8125, tap_pos=0, tap_side=\"hv\", tap_min=-2,\n tap_step_percent=2.5, i0_percent=0.68751, sn_mva=0.016,\n pfe_kw=0.11, name=None, in_service=True, index=None,\n max_loading_percent=max_trafo_loading)\n\n pp.create_sgen(net, 3, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=0.025,\n min_q_mvar=-0.025)\n pp.create_sgen(net, 2, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=0.025,\n min_q_mvar=-0.025)\n pp.create_poly_cost(net, 0, \"sgen\", cp1_eur_per_mw=10)\n pp.create_poly_cost(net, 1, \"sgen\", cp1_eur_per_mw=10)\n pp.create_ext_grid(net, 0)\n pp.create_poly_cost(net, 0, \"ext_grid\", cp1_eur_per_mw=.1)\n pp.create_line_from_parameters(net, 1, 2, 1, name=\"line1\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.200, x_ohm_per_km=0.1159876,\n max_loading_percent=20)\n pp.create_line_from_parameters(net, 1, 3, 1, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.100, x_ohm_per_km=0.1159876,\n max_loading_percent=10)\n\n # run OPF\n pp.runopp(net, init=\"flat\", calculate_voltage_angles=False)\n assert net[\"OPF_converged\"]\n\n assert np.allclose(net[\"_ppc\"][\"branch\"][:, 5], np.array([0.02771281 + 0.j, 0.00692820 + 0.j, 0.12800000 + 0.j]))\n\n # assert and check result\n logger.debug(\"test_opf_sgen_loading\")\n logger.debug(\"res_sgen:\\n%s\" % net.res_sgen)\n logger.debug(\"res_line.loading_percent:\\n%s\" % net.res_line.loading_percent)\n assert net.res_line.loading_percent.at[0] - 20 < 1e-2\n logger.debug(\"res_line.loading_percent:\\n%s\" % net.res_line.loading_percent)\n assert net.res_line.loading_percent.at[1] - 10 < 1e-2\n\n\ndef test_storage_opf():\n \"\"\" Testing a simple network with storage to ensure the correct behaviour\n of the storage OPF-Functions \"\"\"\n\n # boundaries\n vm_max = 1.1\n vm_min = 0.9\n max_line_loading_percent = 100\n\n # create network\n net = pp.create_empty_network()\n\n b1 = pp.create_bus(net, vn_kv=0.4, max_vm_pu=vm_max, min_vm_pu=vm_min)\n b2 = pp.create_bus(net, vn_kv=0.4, max_vm_pu=vm_max, min_vm_pu=vm_min)\n\n pp.create_line(net, b1, b2, length_km=5, std_type=\"NAYY 4x50 SE\",\n max_loading_percent=max_line_loading_percent)\n\n # test elements static\n pp.create_ext_grid(net, b2)\n pp.create_load(net, b1, p_mw=0.0075, controllable=False)\n pp.create_sgen(net, b1, p_mw=0.025, controllable=True, min_p_mw=0.01, max_p_mw=0.025,\n max_q_mvar=0.025, min_q_mvar=-0.025)\n\n # test elements\n pp.create_storage(net, b1, p_mw=-.0025, max_e_mwh=50, controllable=True, max_p_mw=0,\n min_p_mw=-0.025, max_q_mvar=0.025, min_q_mvar=-0.025)\n pp.create_sgen(net, b1, p_mw=0.025, controllable=True, min_p_mw=0, max_p_mw=0.025,\n max_q_mvar=0.025, min_q_mvar=-0.025)\n pp.create_load(net, b1, p_mw=0.025, controllable=True, max_p_mw=0.025, min_p_mw=0,\n max_q_mvar=0.025, min_q_mvar=-0.025)\n\n # costs\n pp.create_poly_cost(net, 0, \"ext_grid\", cp1_eur_per_mw=3)\n pp.create_poly_cost(net, 0, \"sgen\", cp1_eur_per_mw=2)\n pp.create_poly_cost(net, 0, \"storage\", cp1_eur_per_mw=-1)\n pp.create_poly_cost(net, 1, \"sgen\", cp1_eur_per_mw=1)\n\n pp.create_poly_cost(net, 1, \"load\", cp1_eur_per_mw=-3)\n\n # test storage generator behaviour\n net[\"storage\"].in_service.iloc[0] = True\n net[\"storage\"].p_mw.iloc[0] = -0.025\n net[\"sgen\"].in_service.iloc[1] = False\n net[\"load\"].in_service.iloc[1] = False\n\n pp.runopp(net)\n assert net[\"OPF_converged\"]\n\n res_stor_p_mw = net[\"res_storage\"].p_mw.iloc[0]\n res_stor_q_mvar = net[\"res_storage\"].q_mvar.iloc[0]\n res_cost_stor = net[\"res_cost\"]\n\n net[\"storage\"].in_service.iloc[0] = False\n net[\"storage\"].p_mw.iloc[0] = -0.025\n net[\"sgen\"].in_service.iloc[1] = True\n net[\"load\"].in_service.iloc[1] = False\n\n pp.runopp(net)\n assert net[\"OPF_converged\"]\n\n res_sgen_p_mw = net[\"res_sgen\"].p_mw.iloc[1]\n res_sgen_q_mvar = net[\"res_sgen\"].q_mvar.iloc[1]\n res_cost_sgen = net[\"res_cost\"]\n\n # assert storage generator behaviour\n assert np.isclose(res_stor_p_mw, -res_sgen_p_mw)\n assert np.isclose(res_stor_q_mvar, -res_sgen_q_mvar)\n assert np.isclose(res_cost_stor, res_cost_sgen)\n\n # test storage load behaviour\n net[\"storage\"].in_service.iloc[0] = True\n net[\"storage\"].p_mw.iloc[0] = 0.025\n net[\"storage\"].max_p_mw.iloc[0] = 0.025\n net[\"storage\"].min_p_mw.iloc[0] = 0\n net[\"storage\"].max_q_mvar.iloc[0] = 0.025\n net[\"storage\"].min_q_mvar.iloc[0] = -0.025\n # gencost for storages: positive costs in pandapower per definition\n # --> storage gencosts are similar to sgen gencosts (make_objective.py, l.128ff. and l.185ff.)\n net[\"poly_cost\"].cp1_eur_per_mw.iloc[2] = net.poly_cost.cp1_eur_per_mw.iloc[4]\n net[\"sgen\"].in_service.iloc[1] = False\n net[\"load\"].in_service.iloc[1] = False\n\n pp.runopp(net)\n assert net[\"OPF_converged\"]\n\n res_stor_p_mw = net[\"res_storage\"].p_mw.iloc[0]\n res_stor_q_mvar = net[\"res_storage\"].q_mvar.iloc[0]\n res_cost_stor = net[\"res_cost\"]\n\n net[\"storage\"].in_service.iloc[0] = False\n net[\"storage\"].p_mw.iloc[0] = 0.025\n net[\"sgen\"].in_service.iloc[1] = False\n net[\"load\"].in_service.iloc[1] = True\n\n pp.runopp(net)\n assert net[\"OPF_converged\"]\n\n res_load_p_mw = net[\"res_load\"].p_mw.iloc[1]\n res_load_q_mvar = net[\"res_load\"].q_mvar.iloc[1]\n res_cost_load = net[\"res_cost\"]\n\n # assert storage load behaviour\n assert np.isclose(res_stor_p_mw, res_load_p_mw)\n assert np.isclose(res_stor_q_mvar, res_load_q_mvar)\n assert np.isclose(res_cost_stor, res_cost_load)\n\n\ndef test_in_service_controllables():\n \"\"\" Testing controllable but out of service elements behaviour \"\"\"\n # boundaries\n vm_max = 1.1\n vm_min = 0.9\n max_line_loading_percent = 100\n\n # create network\n net = pp.create_empty_network()\n b1 = pp.create_bus(net, vn_kv=0.4, max_vm_pu=vm_max, min_vm_pu=vm_min)\n b2 = pp.create_bus(net, vn_kv=0.4, max_vm_pu=vm_max, min_vm_pu=vm_min)\n\n pp.create_line(net, b1, b2, length_km=5, std_type=\"NAYY 4x50 SE\",\n max_loading_percent=max_line_loading_percent)\n\n # test elements static\n pp.create_ext_grid(net, b2)\n pp.create_load(net, b1, p_mw=7.5, controllable=True, max_p_mw=0.010, min_p_mw=0,\n max_q_mvar=2.5, min_q_mvar=-2.5)\n pp.create_sgen(net, b1, p_mw=0.025, controllable=True, min_p_mw=0.01, max_p_mw=0.025,\n max_q_mvar=0.025, min_q_mvar=-0.025)\n\n # test elements\n pp.create_sgen(net, b1, p_mw=0.025, controllable=True, min_p_mw=0, max_p_mw=0.025,\n max_q_mvar=0.025, min_q_mvar=-0.025)\n pp.create_load(net, b1, p_mw=0.025, controllable=True, max_p_mw=0.0025, min_p_mw=0,\n max_q_mvar=2.5, min_q_mvar=-2.5)\n\n # costs\n pp.create_poly_cost(net, 0, \"ext_grid\", cp1_eur_per_mw=3)\n pp.create_poly_cost(net, 0, \"load\", cp1_eur_per_mw=-1)\n pp.create_poly_cost(net, 0, \"sgen\", cp1_eur_per_mw=2)\n pp.create_poly_cost(net, 1, \"sgen\", cp1_eur_per_mw=1)\n pp.create_poly_cost(net, 1, \"load\", cp1_eur_per_mw=-1)\n\n net[\"sgen\"].in_service.iloc[1] = False\n net[\"load\"].in_service.iloc[1] = False\n\n pp.runopp(net)\n assert net[\"OPF_converged\"]\n\n\ndef test_no_controllables(simple_opf_test_net):\n # was ist das problwem an diesem fall und wie fange ich es ab?\n net = simple_opf_test_net\n net.gen.controllable = False\n pp.create_poly_cost(net, 0, \"gen\", cp1_eur_per_mw=-2)\n pp.create_poly_cost(net, 0, \"load\", cp1_eur_per_mw=1)\n try:\n pp.runopp(net)\n except pp.OPFNotConverged:\n # opf will fail if not bus limits are set and vm_pu is the default value of 1.0 (it is enforced)\n assert True\n net.gen.loc[:, \"vm_pu\"] = 1.062 # vm_pu setpoint is mandatory if controllable=False\n net.gen.loc[:, \"p_mw\"] = 0.149\n pp.runopp(net)\n assert np.allclose(net.res_gen.at[0, \"vm_pu\"], 1.062)\n assert np.allclose(net.res_gen.at[0, \"p_mw\"], 0.149)\n\n\ndef test_opf_no_controllables_vs_pf():\n \"\"\" Comparing the calculation results of PF and OPF in a simple network with non-controllable\n elements \"\"\"\n\n # boundaries\n vm_max = 1.3\n vm_min = 0.9\n max_line_loading_percent = 100\n\n # create network\n net = pp.create_empty_network()\n\n b1 = pp.create_bus(net, vn_kv=0.4, max_vm_pu=vm_max, min_vm_pu=vm_min)\n b2 = pp.create_bus(net, vn_kv=0.4, max_vm_pu=vm_max, min_vm_pu=vm_min)\n\n pp.create_line(net, b1, b2, length_km=5, std_type=\"NAYY 4x50 SE\",\n max_loading_percent=max_line_loading_percent)\n\n # test elements static\n pp.create_ext_grid(net, b2)\n pp.create_load(net, b1, p_mw=.0075, controllable=False)\n pp.create_sgen(net, b1, p_mw=0.025, controllable=False, min_p_mw=0.01, max_p_mw=0.025,\n max_q_mvar=0.025, min_q_mvar=-0.025)\n\n # testing cost assignment (for non-controllable elements - see Gitlab Issue #27)\n pp.create_poly_cost(net, 0, \"ext_grid\", cp1_eur_per_mw=3)\n pp.create_poly_cost(net, 0, \"load\", cp1_eur_per_mw=-3)\n pp.create_poly_cost(net, 0, \"sgen\", cp1_eur_per_mw=2)\n\n # do calculations\n pp.runopp(net)\n assert net[\"OPF_converged\"]\n\n res_opf_line_loading = net.res_line.loading_percent\n res_opf_bus_voltages = net.res_bus.vm_pu\n\n pp.runpp(net)\n assert net[\"converged\"]\n\n res_pf_line_loading = net.res_line.loading_percent\n res_pf_bus_voltages = net.res_bus.vm_pu\n\n # assert calculation behaviour\n assert np.isclose(res_opf_line_loading, res_pf_line_loading).all()\n assert np.isclose(res_opf_bus_voltages, res_pf_bus_voltages).all()\n\n\ndef test_line_temperature():\n net = simplest_grid()\n r_init = net.line.r_ohm_per_km.copy()\n\n # run OPF\n pp.runopp(net, verbose=False)\n va_init = net.res_bus.va_degree\n assert \"r_ohm_per_km\" not in net.res_line.columns\n\n # check results of r adjustment, check that user_pf_options works, alpha\n net.line[\"temperature_degree_celsius\"] = 80\n alpha = 4.03e-3\n net.line['alpha'] = alpha\n pp.runopp(net, verbose=False, consider_line_temperature=True)\n r_temp = r_init * (1 + alpha * (80 - 20))\n assert np.allclose(net.res_line.r_ohm_per_km, r_temp, rtol=0, atol=1e-16)\n assert not np.allclose(net.res_bus.va_degree, va_init, rtol=0, atol=1e-2)\n\n pp.runopp(net, verbose=False, consider_line_temperature=False)\n assert np.allclose(net.res_bus.va_degree, va_init, rtol=0, atol=1e-16)\n assert \"r_ohm_per_km\" not in net.res_line.columns\n\n\[email protected]\ndef four_bus_net():\n net = simple_four_bus_system()\n net.sgen.drop(index=1, inplace=True)\n net.load.drop(index=1, inplace=True)\n return net\n\n\ndef test_three_slacks_vm_setpoint(four_bus_net):\n # tests a net with three slacks in one area. Two of them will be converted to gens, since only one is allowed per\n # area. The others should have vmin / vmax set as their vm_pu setpoint\n net = four_bus_net\n # create two additional slacks with different voltage setpoints\n pp.create_ext_grid(net, 1, vm_pu=1.01, max_p_mw=1., min_p_mw=-1., min_q_mvar=-1, max_q_mvar=1.)\n pp.create_ext_grid(net, 3, vm_pu=1.02, max_p_mw=1., min_p_mw=-1., min_q_mvar=-1, max_q_mvar=1.)\n pp.runpp(net)\n # assert if voltage limits are correct in result in pf an opf\n assert np.allclose(net.res_bus.loc[[0, 1, 3], \"vm_pu\"], [1., 1.01, 1.02])\n pp.runopp(net, calculate_voltage_angles=False)\n assert np.allclose(net.res_bus.loc[[0, 1, 3], \"vm_pu\"], [1., 1.01, 1.02])\n\n\ndef test_only_gen_slack_vm_setpoint(four_bus_net):\n # tests a net with only gens of which one of them is a a slack\n # The vmin / vmax vm_pu setpoint should be correct\n net = four_bus_net\n net.ext_grid.drop(index=net.ext_grid.index, inplace=True)\n net.bus.loc[:, \"min_vm_pu\"] = 0.9\n net.bus.loc[:, \"max_vm_pu\"] = 1.1\n # create two additional slacks with different voltage setpoints\n pp.create_gen(net, 0, p_mw=0., vm_pu=1., max_p_mw=1., min_p_mw=-1., min_q_mvar=-1, max_q_mvar=1., slack=True)\n g1 = pp.create_gen(net, 1, p_mw=0.02, vm_pu=1.01, max_p_mw=1., min_p_mw=-1., min_q_mvar=-1, max_q_mvar=1.,\n controllable=False) # controllable == False -> vm_pu enforced\n g3 = pp.create_gen(net, 3, p_mw=0.01, vm_pu=1.02, max_p_mw=1., min_p_mw=-1.,\n min_q_mvar=-1, max_q_mvar=1.) # controllable == True -> vm_pu between bus voltages\n pp.runpp(net)\n # assert if voltage limits are correct in result in pf an opf\n assert np.allclose(net.res_bus.loc[[0, 1, 3], \"vm_pu\"], [1., 1.01, 1.02])\n pp.runopp(net, calculate_voltage_angles=False)\n\n # controllable == True is more important than slack == True -> vm_pu is between bus limits\n assert not np.allclose(net.res_bus.at[0, \"vm_pu\"], 1.)\n # controllable == True is less important than slack == True -> see\n # https://github.com/e2nIEE/pandapower/issues/511#issuecomment-536593128\n\n # assert value of controllable == False gen\n assert np.allclose(net.res_bus.at[1, \"vm_pu\"], 1.01)\n assert np.allclose(net.res_bus.at[1, \"p_mw\"], -0.02)\n # assert limit of controllable == True gen\n assert 0.9 < net.res_bus.at[3, \"vm_pu\"] < 1.1\n assert not net.res_bus.at[3, \"vm_pu\"] == 1.02\n\n\ndef test_gen_p_vm_fixed(four_bus_net):\n # tests if gen max_vm_pu and min_vm_pu are correctly enforced\n net = four_bus_net\n min_vm_pu, max_vm_pu = .95, 1.05\n min_p_mw, max_p_mw = 0., 1.\n p_mw, vm_pu = 0.02, 1.01\n bus = 1\n\n # controllable == False -> limits are ignored and p_mw / vm_pu values are enforced\n pp.create_gen(net, bus, p_mw=p_mw, vm_pu=vm_pu, controllable=False,\n min_vm_pu=min_vm_pu, max_vm_pu=max_vm_pu, min_p_mw=min_p_mw, max_p_mw=max_p_mw)\n pp.runopp(net, calculate_voltage_angles=False)\n assert np.allclose(net.res_bus.at[bus, \"vm_pu\"], vm_pu)\n assert np.allclose(net.res_bus.at[bus, \"p_mw\"], -p_mw)\n\n\ndef test_gen_p_vm_limits(four_bus_net):\n # tests if gen max_vm_pu and min_vm_pu are correctly enforced\n net = four_bus_net\n net.bus.loc[:, \"min_vm_pu\"] = 0.9\n net.bus.loc[:, \"max_vm_pu\"] = 1.1\n min_vm_pu, max_vm_pu = .99, 1.005\n min_p_mw, max_p_mw = 0., 1.\n bus = 1\n # controllable == False -> limits are ignored and p_mw / vm_pu values are enforced\n pp.create_gen(net, bus, p_mw=0.02, vm_pu=1.01, controllable=True,\n min_vm_pu=min_vm_pu, max_vm_pu=max_vm_pu, min_p_mw=min_p_mw, max_p_mw=max_p_mw)\n pp.runopp(net, calculate_voltage_angles=False)\n assert not np.allclose(net.res_bus.at[bus, \"vm_pu\"], 1.01)\n assert not np.allclose(net.res_bus.at[bus, \"p_mw\"], 0.02)\n assert min_vm_pu < net.res_bus.at[bus, \"vm_pu\"] < max_vm_pu\n assert min_p_mw <= -net.res_bus.at[bus, \"p_mw\"] < max_p_mw\n\n\ndef test_gen_violated_p_vm_limits(four_bus_net):\n # tests if gen max_vm_pu and min_vm_pu are correctly enforced\n net = four_bus_net\n min_vm_pu, max_vm_pu = .98, 1.007 # gen limits are out of bus limits\n net.bus.loc[:, \"min_vm_pu\"] = min_vm_pu\n net.bus.loc[:, \"max_vm_pu\"] = max_vm_pu\n\n min_p_mw, max_p_mw = 0., 1.\n bus = 1\n # controllable == False -> limits are ignored and p_mw / vm_pu values are enforced\n g = pp.create_gen(net, bus, p_mw=0.02, vm_pu=1.01, controllable=True,\n min_vm_pu=.9, max_vm_pu=1.1, min_p_mw=min_p_mw, max_p_mw=max_p_mw)\n pp.runopp(net, calculate_voltage_angles=False)\n assert not np.allclose(net.res_bus.at[bus, \"vm_pu\"], 1.01)\n assert not np.allclose(net.res_bus.at[bus, \"p_mw\"], 0.02)\n assert min_vm_pu < net.res_bus.at[bus, \"vm_pu\"] < max_vm_pu\n assert min_p_mw <= -net.res_bus.at[bus, \"p_mw\"] < max_p_mw\n net.gen.at[g, \"vm_pu\"] = 0.9 # lower bus vm_pu limit violation\n pp.runopp(net, calculate_voltage_angles=False)\n assert min_vm_pu < net.res_bus.at[bus, \"vm_pu\"] < max_vm_pu\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-xs\"])\n"
] |
[
[
"numpy.array",
"numpy.allclose",
"numpy.isclose"
]
] |
mwilliammyers/dcnn
|
[
"ab5e30fc2fdb80781459608a6c452334414756d0"
] |
[
"dcnn.py"
] |
[
"import time\nimport timeit\nimport numpy as np\nimport dataloader\nimport models\nimport torch\nimport tqdm\nfrom tensorboardX import SummaryWriter\n\n\ndef get_arguments():\n import argparse\n\n model_choices = ['dcnn', 'mlp']\n non_linearity_choices = ['tanh', 'relu', 'leaky-relu']\n optim_choices = ['adagrad', 'adadelta', 'adam']\n dataset_choices = ['twitter', 'twitter-large', 'yelp']\n\n parser = argparse.ArgumentParser('Dynamic CNN in PyTorch', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n '-c',\n '--current-run',\n dest='current_run',\n metavar='CURRENT_RUN',\n type=int,\n default=1,\n help='The current run') # yapf: disable\n parser.add_argument(\n '-n',\n '--num-epochs',\n dest='num_epochs',\n metavar='EPOCHS',\n type=int,\n default=8,\n help='Number of epochs to train for') # yapf: disable\n parser.add_argument(\n '-b',\n '--batch-size',\n dest='batch_size',\n metavar='BATCH-SIZE',\n type=int,\n default=16,\n help='Size of a mini batch') # yapf: disable\n parser.add_argument(\n '-k',\n '--kernel-sizes',\n dest='kernel_sizes',\n metavar='KERNEL-SIZE',\n type=int,\n nargs='*',\n default=[7, 5],\n help='Size of the kernels per layer; formatted as a space delimited list') # yapf: disable\n parser.add_argument(\n '-f',\n '--num-filters',\n dest='num_filters',\n metavar='NUM-FILTER',\n type=int,\n nargs='*',\n default=[6, 14],\n help='The number of filters at each layer; formatted as a space delimited list') # yapf: disable\n parser.add_argument(\n '-l',\n '--log',\n dest='log',\n metavar='LOG-FILE',\n type=str,\n default='logs/stats',\n help='Path to output log file') # yapf: disable\n parser.add_argument(\n '-m',\n '--model',\n dest='model',\n metavar='MODEL-TYPE',\n type=str,\n default='dcnn',\n choices=model_choices,\n help=f'Model to use. One of {model_choices}') # yapf: disable\n parser.add_argument(\n '-r',\n '--lr',\n dest='learning_rate',\n metavar='LEARNING-RATE',\n type=float,\n default=0.03,\n help='Learning rate') # yapf: disable\n parser.add_argument(\n '-e',\n '--embed-dim',\n dest='embedding_dim',\n metavar='EMBED-DIMENSION',\n type=int,\n default=80,\n help='Dimension of the word embeddings') # yapf: disable\n parser.add_argument(\n '-p',\n '--eval-period',\n dest='eval_period',\n metavar='NUM-BATCHES',\n type=int,\n default=200,\n help='Number of training batches between validation evals') # yapf: disable\n parser.add_argument(\n '-a',\n '--non-linearity',\n dest='non_linearity',\n metavar='FUNCTION',\n default=non_linearity_choices[0],\n choices=non_linearity_choices,\n help=f'Non linearity function. One of {non_linearity_choices}') # yapf: disable\n parser.add_argument(\n '-o',\n '--optim',\n dest='optim',\n metavar='OPTIMIZER-ALGORITHM',\n default=optim_choices[0],\n choices=optim_choices,\n help=f'Optimization algorithm. One of {optim_choices}') # yapf: disable\n parser.add_argument(\n '-d',\n '--dataset',\n dest='dataset',\n metavar='DATASET',\n default=dataset_choices[0],\n choices=dataset_choices,\n help=f'Dataset. One of {dataset_choices}') # yapf: disable\n parser.add_argument(\n '-t',\n '--track-mistakes',\n dest='track_mistakes',\n action='store_true',\n default=False,\n help='Show counts of mis-predicted labels') # yapf: disable\n return parser.parse_args()\n\n\ndef get_data_iters(args):\n start_time = timeit.default_timer()\n device = None if torch.cuda.is_available() else -1 # None == GPU, -1 == CPU\n if args.dataset == 'twitter':\n load_data = dataloader.twitter(embedding_dim=args.embedding_dim, batch_size=args.batch_size, device=device)\n elif args.dataset == 'twitter-large':\n load_data = dataloader.twitter(\n embedding_dim=args.embedding_dim,\n path='data/twitter.training.csv',\n fields=('text', 'label'),\n batch_size=args.batch_size,\n device=device)\n elif args.dataset == 'yelp':\n load_data = dataloader.yelp(embedding_dim=args.embedding_dim, batch_size=args.batch_size, device=device)\n\n train_iter, val_iter, test_iter = load_data()\n val_iter.sort_key = test_iter.sort_key = lambda example: len(example.text)\n print('loaded data in:', timeit.default_timer() - start_time)\n return train_iter, val_iter, test_iter\n\n\ndef get_model(args, num_embeddings, num_classes):\n non_linearities = {'tanh': torch.tanh, 'relu': torch.nn.ReLU(), 'leaky-relu': torch.nn.LeakyReLU()}\n if args.model == 'dcnn':\n model = models.DCNN(\n num_embeddings,\n args.embedding_dim,\n num_classes,\n kernel_sizes=args.kernel_sizes,\n num_filters=args.num_filters,\n non_linearity=non_linearities[args.non_linearity])\n elif args.model == 'mlp':\n max_length = max(len(x.text) for x in train_iter.data())\n model = models.MLP(num_embeddings, args.embedding_dim, max_length, num_classes)\n if torch.cuda.is_available():\n model = model.cuda()\n return model\n\n\ndef get_optim(args, parameters):\n if args.optim == 'adagrad':\n return torch.optim.Adagrad(parameters, lr=args.learning_rate)\n if args.optim == 'adadelta':\n return torch.optim.Adadelta(parameters)\n elif args.optim == 'adam':\n return torch.optim.Adam(parameters, lr=args.learning_rate)\n\n\ndef calc_accuracy(outputs, targets):\n correct = (outputs.data.max(dim=1)[1] == targets.data)\n return torch.sum(correct) / targets.size()[0]\n\n\ndef compute_confusion(model, val_iter):\n print('Evaluating mistakes...')\n mistakes = {k: 0 for k in val_iter.dataset.fields['label'].vocab.itos}\n confusion = np.zeros((3, 3))\n for batch in val_iter:\n outputs = model(batch.text)\n loss = criterion(outputs, batch.label)\n targets = batch.label\n predictions = outputs.data.max(dim=1)[1]\n correct = (predictions == targets.data)\n correct = correct.cpu().numpy().astype('bool')\n label = targets.data.cpu().numpy().tolist()\n predictions = predictions.cpu().numpy().tolist()\n for lab, pred in zip(label, predictions):\n confusion[lab, pred] += 1\n print('Confusion matrix:')\n print(confusion)\n return confusion\n\n\nif __name__ == '__main__':\n args = get_arguments()\n\n train_iter, val_iter, test_iter = get_data_iters(args)\n\n model = get_model(\n args,\n num_embeddings=len(train_iter.dataset.fields['text'].vocab),\n num_classes=len(train_iter.dataset.fields['label'].vocab))\n\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = get_optim(args, model.params())\n\n log_file = f\"{args.log}_run-{args.current_run}_{time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime())}\"\n writer = SummaryWriter(log_file)\n\n writer.add_text('hyperparameters', str(args.__dict__))\n\n # `stats` has form [train_loss, train_acc, validation_loss, validation_acc]\n stats = np.zeros(4, dtype='float64')\n desc = f'run {args.current_run}'\n with tqdm.tqdm(train_iter, total=len(train_iter) * args.num_epochs, position=args.current_run, desc=desc) as pbar:\n for i, batch in enumerate(train_iter):\n optimizer.zero_grad()\n\n outputs = model(batch.text)\n loss = criterion(outputs, batch.label)\n loss.backward()\n optimizer.step()\n\n stats[0] += loss.data[0]\n stats[1] += calc_accuracy(outputs, batch.label)\n\n pbar.update()\n if (i % args.eval_period) == (args.eval_period - 1):\n for batch in val_iter:\n outputs = model(batch.text)\n loss = criterion(outputs, batch.label)\n stats[2] += loss.data[0]\n stats[3] += calc_accuracy(outputs, batch.label)\n stats[:2] /= args.eval_period\n stats[2:] /= len(val_iter)\n\n writer.add_scalar('stats/train_loss', stats[0], i)\n writer.add_scalar('stats/train_acc', stats[1], i)\n writer.add_scalar('stats/validation_loss', stats[2], i)\n writer.add_scalar('stats/validation_acc', stats[3], i)\n\n for name, param in model.named_parameters():\n writer.add_histogram(name, param, i)\n\n pbar.set_postfix(train_loss=stats[0], train_acc=stats[1], val_loss=stats[2], val_acc=stats[3])\n stats[:] = 0\n if train_iter.epoch >= args.num_epochs:\n break\n\n if args.track_mistakes:\n confusion = compute_confusion(model, val_iter)\n\n writer.scalar_dict['hyperparameters'] = args.__dict__\n title = f\"{args.log.split('/')[-1].split('_')[0]}_{args.model}_{args.dataset}_run-{args.current_run}\"\n writer.scalar_dict['title'] = title\n writer.export_scalars_to_json(f'{log_file}.json')\n writer.close()\n"
] |
[
[
"torch.optim.Adam",
"torch.nn.CrossEntropyLoss",
"torch.optim.Adagrad",
"torch.sum",
"torch.optim.Adadelta",
"torch.nn.LeakyReLU",
"torch.cuda.is_available",
"torch.nn.ReLU",
"numpy.zeros"
]
] |
robotic-vision-lab/Attention-With-Varying-Receptive-Fields-Network
|
[
"e151216ca029dc72ab93e03d6bcacd69161d1c25"
] |
[
"SRcode/InceptionModules.py"
] |
[
"import tensorflow as tf\nimport numpy as np\nimport tensorflow.keras as keras\nfrom tensorflow.keras import datasets, layers, models, regularizers\nimport pdb\nclass inception_module(layers.Layer):\n \"\"\"\n Inception Module\n This module is made for image super-resolution where max pooling layers\n are known to be unnecessary. In addition instead of different sized\n kernels, this module instead uses different dilation rates.\n This module as with all other modules in this file may be used the Keras \n sequential API.\n \"\"\"\n def __init__(self, filters,reduction = 1, name = None):\n super(inception_module, self).__init__()\n self.mergea = layers.Conv2D(filters //2, kernel_size = (1,1), \n name = 'merge2d-1', activation = 'linear',\n padding = 'same',\n strides = (1,1))\n self.mergeb = layers.Conv2D(filters //2, kernel_size = (3,3),\n name = 'merge2d-2', activation = 'linear', \n padding = 'same', \n strides = 1)\n self.mergec = layers.Conv2D(filters //2, kernel_size = (5,5),\n name = 'merge2d-3', activation = 'linear', \n padding = 'same', \n strides = 1)\n self.pool = layers.MaxPool2D(pool_size = (3,3), strides = 1,\n padding = 'same')\n def call(self, x):\n a = self.mergea(x)\n\n b = self.mergeb(x)\n\n c = self.mergec(x)\n \n d = self.pool(x)\n return tf.concat([a,b,c,d], axis = -1)\nclass incept_dilated(layers.Layer):\n \"\"\"\n Inception Module with different dilation rates\n This module is made for image super-resolution where max pooling layers\n are known to be unnecessary. In addition instead of different sized\n kernels, this module instead uses different dilation rates.\n This module as with all other modules in this file may be used the Keras \n sequential API.\n \"\"\"\n def __init__(self, filters,reduction = 2, name = None):\n super(incept_dilated, self).__init__()\n self.mergea = layers.Conv2D(filters // reduction, kernel_size = (1,1), \n name = 'merge2d-1', activation = 'relu',\n strides = (1,1), padding = 'same')\n self.mergeb = layers.Conv2D(filters //reduction, kernel_size\n=(3,3),\n name = 'merge2d-2', activation = 'relu', \n padding = 'same', dilation_rate = 1,\n strides = 1)\n self.mergec = layers.Conv2D(filters //reduction, kernel_size =\n(5,5),\n name = 'merge2d-3', activation = 'relu', \n padding = 'same', dilation_rate = 1,\n strides = 1)\n #self.pool = layers.MaxPool2D(pool_size = (3,3), strides = 1,padding = 'same') \n def call(self, x):\n a= self.mergea(x)\n\n b = self.mergeb(x)\n\n c = self.mergec(x)\n \n #d = self.pool(x)\n \n \n\n return tf.concat([a,b,c], axis = -1)\n \n\n\n"
] |
[
[
"tensorflow.keras.layers.Conv2D",
"tensorflow.concat",
"tensorflow.keras.layers.MaxPool2D"
]
] |
Victorwz/awesome_NLP_projects
|
[
"d16ca80b1b5bcc0536380599a01af173507961ad",
"d16ca80b1b5bcc0536380599a01af173507961ad"
] |
[
"attention/main.py",
"ngram/code_for_figures_plot/plt_frac.py"
] |
[
"import argparse\nimport control\nimport data\nimport datetime\nimport mylogger\nimport math\nimport model\nimport os\nimport random\nimport sys\nimport time\nimport torch\nimport torch.nn as nn\n\n\ndef make_model_path(args):\n if not os.path.exists('scratch'):\n os.makedirs('scratch')\n model_path = 'scratch/'\n model_path += '%s_' % os.path.basename(os.path.normpath(args.data))\n model_path += 'BS%d_' % args.batch_size\n model_path += 'BM:%s_' % args.batch_method\n model_path += 'bptt%d_' % args.bptt\n model_path += 'dim%d_' % args.dim\n model_path += 'nlayers%d_' % args.nlayers\n model_path += 'dropout%.2f_' % args.dropout\n if args.cond:\n model_path += 'cond_'\n if args.bidir:\n model_path += 'bidir_'\n if args.bridge:\n model_path += 'bridge_'\n if args.attn:\n model_path += 'attn_'\n if args.sort:\n model_path += 'sort_'\n if args.shuffle:\n model_path += 'shuffle_'\n model_path += 'lr%.2f_' % args.lr\n model_path += 'seed%d_' % args.seed\n model_path += 'epochs%d' % args.epochs\n return model_path\n\n\ndef main(args):\n main_start_time = time.time()\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n device = torch.device('cuda' if args.cuda else 'cpu')\n\n model_path = args.model if args.model else make_model_path(args)\n logger = mylogger.Logger(model_path + '.log', args.train)\n logger.log(' '.join(sys.argv) + '\\n')\n\n if args.batch_size_valid > 0:\n batch_size_valid = args.batch_size_valid\n else:\n batch_size_valid = 1 if args.batch_method == 'continuous' else 60\n\n dat = data.Data(args.data, args.batch_size, args.batch_method, device,\n sort=args.sort, logger=logger, is_conditional=args.cond,\n batch_size_valid=batch_size_valid)\n\n s2s = model.Seq2Seq(len(dat.i2w), args.dim, args.nlayers,\n args.dropout, is_conditional=args.cond,\n bidirectional_encoder=args.bidir,\n use_bridge=args.bridge, use_attention=args.attn,\n logger=logger).to(device)\n\n ctrl = control.Control(s2s, args.lr, args.bptt, args.interval,\n model_path=model_path, logger=logger)\n\n if args.train:\n ctrl.train(dat, args.epochs, args.shuffle)\n logger.log(time.strftime(\"%H:%M:%S\", time.gmtime(time.time()\n - main_start_time)))\n else:\n ctrl.load_s2s()\n train_loss, train_sqxent = ctrl.evaluate(dat.train)\n valid_loss, valid_sqxent = ctrl.evaluate(dat.valid)\n print('train ppl: %.2f train sqxent: %.2f' %\n (math.exp(train_loss), train_sqxent))\n print('valid ppl: %.2f valid sqxent: %.2f' %\n (math.exp(valid_loss), valid_sqxent))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--model', type=str, default='',\n help='model path [%(default)s]')\n parser.add_argument('--data', type=str, default='./data',\n help='data directory [%(default)s]')\n parser.add_argument('--train', action='store_true',\n help='train?')\n parser.add_argument('--batch_size', type=int, default=20, metavar='BS',\n help='batch size [%(default)d]')\n parser.add_argument('--batch_size_valid', type=int, default=0,\n metavar='BSV',\n help='validation batch size (0 if auto) [%(default)d]')\n parser.add_argument('--batch_size_test', type=int, default=0,\n metavar='BST',\n help='test batch size (0 if auto) [%(default)d]')\n parser.add_argument('--batch_method', type=str, default='continuous',\n metavar='BM',\n help='batch method (continuous, translation) '\n '[%(default)s]')\n parser.add_argument('--bptt', type=int, default=35,\n help='sequence length [%(default)d]')\n parser.add_argument('--dim', type=int, default=100,\n help='dimension of input/hidden states [%(default)d]')\n parser.add_argument('--nlayers', type=int, default=2,\n help='number of LSTM layers [%(default)d]')\n parser.add_argument('--dropout', type=float, default=0.2,\n help='dropout applied to layers (0 = no dropout) '\n '[%(default)f]')\n parser.add_argument('--cond', action='store_true',\n help='conditional language model?')\n parser.add_argument('--bidir', action='store_true',\n help='bidirectional encoder?')\n parser.add_argument('--bridge', action='store_true',\n help='use bridge?')\n parser.add_argument('--attn', action='store_true',\n help='use attention?')\n parser.add_argument('--sort', action='store_true',\n help='sort by target lengths before batching? '\n '(only for translation data)')\n parser.add_argument('--shuffle', action='store_true',\n help='shuffle bundles? (only for translation data)')\n parser.add_argument('--lr', type=float, default=20,\n help='initial learning rate [%(default)f]')\n parser.add_argument('--seed', type=int, default=1111,\n help='random seed [%(default)d]')\n parser.add_argument('--epochs', type=int, default=10,\n help='upper epoch limit [%(default)d]')\n parser.add_argument('--interval', type=int, default=20,\n help='logging interval [%(default)d]')\n parser.add_argument('--cuda', action='store_true',\n help='use CUDA?')\n\n args = parser.parse_args()\n main(args)\n",
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrac = np.zeros(10)\nfrac[0] = 0.1\nfor i in range(1, len(frac)):\n\tfrac[i] = frac[i-1] + 0.1 \n\ntrain_perplexity = [101.60, 103.35, 102.75, 103.03, 103.63, 102.47, 101.87, 101.57, 101.38, 101.43]\n\nval_perplexity = [601.93, 436.71, 362.35, 320.87, 295.11, 275.76, 262.86, 250.52, 240.85, 232.10]\n\nplt.plot(frac, train_perplexity, color = \"r\", linestyle = \"-\", marker = \"^\", linewidth = 1, label = \"train\")\n\nplt.plot(frac, val_perplexity, color = \"b\", linestyle = \"-\", marker = \"s\", linewidth = 1, label = \"validation\")\n\nplt.legend(loc='upper center', bbox_to_anchor=(0.6,0.95))\n\nplt.xlabel(\"train_fraction\")\n\nplt.ylabel(\"perplexity\")\n\nplt.title(\"perplexity versus train_fraction\")\n\nplt.savefig(\"frac.pdf\", dpi = 200)"
] |
[
[
"torch.device",
"torch.manual_seed"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.zeros",
"matplotlib.pyplot.ylabel"
]
] |
PokeLegoCuber/BigRedHackVProject
|
[
"14e68152cc4e1441ddacb3a909d81b9c688e47ab"
] |
[
"PythonDataProcessing/main.py"
] |
[
"\"\"\"\r\nReading inputs from a file and outputs corresponding result in a file\r\n\r\nTakes in state abbreviation and county name and outputs house fire risk (int)\r\nBased on data from Red Cross Smoke Alarm Map\r\n\r\nAuthor: Haoxuan Chen\r\nDate: 2019/9/22\r\n\"\"\"\r\nimport pandas as pd\r\n\r\ndf = pd.read_csv('d.csv')\r\nwith open (\"input.txt\", \"r\") as myfile:\r\n i = myfile.readlines()\r\n s = i[0]\r\nstatein = s[:s.index(\" \")]\r\ncounty = s[s.index(\" \")+1:].replace(\" \",\"\")\r\ntry:\r\n out = open(\"result.txt\", \"w\")\r\n out.write(str(int(df.loc[(df['stateAbbrev'] == statein) & (df['county'] == county),'risk'].iloc[0])))\r\n out.close()\r\nexcept:\r\n out = open(\"result.txt\", \"w\")\r\n out.write('invalid input')\r\n out.close()\r\n"
] |
[
[
"pandas.read_csv"
]
] |
efurlanm/tama21
|
[
"5849f2b730a99ca7cca2d1cb27442d321f12419b"
] |
[
"2021-11-08/ng2.py"
] |
[
"import numpy as np, math\nfrom time import time\nfrom mpi4py import MPI\nfrom numba import cuda, njit, prange, config\n\n# parameters\nn = 4800 # n x n grid\nenergy = 1.0 # energy to be injected per iteration\nniters = 500 # number of iterations\n# initialize three heat sources\nnsources = 3 # number of sources of energy\nsources = np.zeros((nsources, 2), np.int16)\nsources[:,:] = [ [n//2, n//2], [n//3, n//3], [n*4//5, n*8//9] ]\n# initialize the data arrays\nanew = np.zeros((n + 2, n + 2), np.float64)\naold = np.zeros((n + 2, n + 2), np.float64)\n\n# configure blocks & grids\n## set the number of threads in a block\nthreads_per_block = (16, 16) # based on trial and error\n## calculate the number of thread blocks in the grid\nblocks_per_grid_x = math.ceil(aold.shape[0] / threads_per_block[0])\nblocks_per_grid_y = math.ceil(aold.shape[1] / threads_per_block[1])\nblocks_per_grid = (blocks_per_grid_x, blocks_per_grid_y)\n\n# computationally intensive core\[email protected]\ndef kernel(A, B):\n n = A.shape[0] - 1\n i, j = cuda.grid(2)\n if (i > 0 and j > 0) and (i < n and j < n) :\n A[i,j]=B[i,j]*.5+(B[i-1,j]+B[i+1,j]+B[i,j-1]+B[i,j+1])*.125\n\n# start of main routine\n\n#---mpi4py---\ncomm = MPI.COMM_WORLD # MPI default communicator\nsize = comm.Get_size() # MPI size\nrank = comm.Get_rank() # MPI rank\nname = MPI.Get_processor_name() # core hostname (eg sdumont3170)\n\n#Only 2 processes per node are selected via Slurm. Within a node, color \n#rank 0 corresponds to the first process of this node, and color rank 1 \n#corresponds to the second process of this node, and the other nodes are \n#similar. Example:\n# node rank color rank\n#----------- ---- ----------\n#sdumont3170 0 0\n#sdumont3170 1 1\n#sdumont3171 2 0\n#sdumont3171 3 1\n#sdumont3172 4 0\n#sdumont3172 5 1\n#sdumont3173 6 0\n#sdumont3173 7 1\nfor i, c in enumerate(name) : # find first digit in hostname\n if c.isdigit() :\n break\nmcol = int(name[i:]) # extract number from hostname\nscomm = comm.Split(color = mcol) # new communicator for the node\ncrank = scomm.Get_rank() # get the node color rank\n\n#---numba.cuda---\n#In this implementation, Slurm is configured to run only 2 processes on \n#each node. For each of these processes (cores), a single GPU is \n#associated. Thus, within a node, color rank 0 is associated with GPU 0, \n#and color rank 1 is associated with GPU 1.\ncuda.select_device(crank) # 'color rank' 0 = 'gpu id' 0, etc.\ncid = cuda.current_context().device.id\n\n# time measurement for rank 0\nif not rank :\n tt = -time() # rank 0 time\n tk = 0 # accumulate kernel time\n tc = 0 # accumulate communication time\n\n# determine my coordinates (x,y)\npdims = MPI.Compute_dims(size, 2)\npx = pdims[0]\npy = pdims[1]\nrx = rank % px\nry = rank // px\n\n# determine my four neighbors\nnorth = (ry - 1) * px + rx\nif (ry - 1) < 0 :\n north = MPI.PROC_NULL\nsouth = (ry + 1) * px + rx\nif (ry + 1) >= py :\n south = MPI.PROC_NULL\nwest = ry * px + rx - 1\nif (rx - 1) < 0 :\n west = MPI.PROC_NULL\neast = ry * px + rx + 1\nif (rx + 1) >= px :\n east = MPI.PROC_NULL\n\n# decompose the domain\nbx = n // px # block size in x\nby = n // py # block size in y\noffx = rx * bx + 1 # offset in x\noffy = ry * by + 1 # offset in y\n\n# sources in my area, local to my rank\nlocnsources = 0\nlocsources = np.empty((nsources, 2), np.int16)\n\n# determine which sources are in my patch\nfor i in range(nsources) :\n locx = sources[i, 0] - offx\n locy = sources[i, 1] - offy\n if(locx >= 0 and locx <= bx and locy >= 0 and locy <= by) :\n locsources[locnsources, 0] = locx\n locsources[locnsources, 1] = locy\n locnsources += 1\n\n# working arrays with 1-wide halo zones\nanew = np.zeros((bx+2, by+2), np.float64)\naold = np.zeros((bx+2, by+2), np.float64)\n\n# system total heat\nrheat = np.zeros(1, np.float64)\nbheat = np.zeros(1, np.float64)\n\n# copy the first arrays to the device\nif not rank : tc -= time()\nanew_global_mem = cuda.to_device(anew)\naold_global_mem = cuda.to_device(aold)\nif not rank : tc += time()\n \n# main loop\nfor _ in range(0, niters, 2) :\n\n # exchange data with neighbors\n if north != MPI.PROC_NULL :\n r1=comm.irecv(source=north, tag=1)\n s1=comm.isend(aold[1, 1:bx+1], dest=north, tag=1)\n if south != MPI.PROC_NULL :\n r2=comm.irecv(source=south, tag=1)\n s2=comm.isend(aold[bx, 1:bx+1], dest=south, tag=1)\n if east != MPI.PROC_NULL :\n r3 = comm.irecv(source=east, tag=1)\n s3 = comm.isend(aold[1:bx+1, bx], dest=east, tag=1)\n if west != MPI.PROC_NULL :\n r4 = comm.irecv(source=west, tag=1)\n s4 = comm.isend(aold[1:bx+1, 1], dest=west, tag=1)\n # wait for the end of communication\n if north != MPI.PROC_NULL :\n s1.wait()\n aold[0, 1:bx+1] = r1.wait()\n if south != MPI.PROC_NULL :\n s2.wait()\n aold[bx+1, 1:bx+1] = r2.wait()\n if east != MPI.PROC_NULL :\n s3.wait()\n aold[1:bx+1, bx+1] = r3.wait()\n if west != MPI.PROC_NULL :\n s4.wait\n aold[1:bx+1, 0] = r4.wait()\n\n # copy the received array to the device\n if not rank : tc -= time()\n aold_global_mem = cuda.to_device(aold)\n if not rank : tc += time()\n \n # update grid\n if not rank : tk -= time()\n kernel[blocks_per_grid, threads_per_block](\n anew_global_mem, aold_global_mem)\n if not rank : tk += time()\n \n # copy the result back to the host\n if not rank : tc -= time()\n anew = anew_global_mem.copy_to_host()\n if not rank : tc += time()\n \n # refresh heat sources\n for i in range(locnsources) :\n anew[locsources[i, 0]-1, locsources[i, 1]-1] += energy\n\n # exchange data with neighbors\n if north != MPI.PROC_NULL :\n r1=comm.irecv(source=north, tag=1)\n s1=comm.isend(anew[1, 1:bx+1], dest=north, tag=1)\n if south != MPI.PROC_NULL :\n r2=comm.irecv(source=south, tag=1)\n s2=comm.isend(anew[bx, 1:bx+1], dest=south, tag=1)\n if east != MPI.PROC_NULL :\n r3 = comm.irecv(source=east, tag=1)\n s3 = comm.isend(anew[1:bx+1, bx], dest=east, tag=1)\n if west != MPI.PROC_NULL :\n r4 = comm.irecv(source=west, tag=1)\n s4 = comm.isend(anew[1:bx+1, 1], dest=west, tag=1)\n # wait for the end of communication\n if north != MPI.PROC_NULL :\n s1.wait()\n anew[0, 1:bx+1] = r1.wait()\n if south != MPI.PROC_NULL :\n s2.wait()\n anew[bx+1, 1:bx+1] = r2.wait()\n if east != MPI.PROC_NULL :\n s3.wait()\n anew[1:bx+1, bx+1] = r3.wait()\n if west != MPI.PROC_NULL :\n s4.wait\n anew[1:bx+1, 0] = r4.wait()\n\n # copy the received array to the device\n if not rank : tc -= time()\n anew_global_mem = cuda.to_device(anew)\n if not rank : tc += time()\n\n # update grid\n if not rank : tk -= time()\n kernel[blocks_per_grid, threads_per_block](\n aold_global_mem, anew_global_mem)\n if not rank : tk += time()\n \n # copy the result back to the host\n if not rank : tc -= time()\n aold = aold_global_mem.copy_to_host()\n if not rank : tc += time()\n \n # refresh heat sources\n for i in range(locnsources) :\n aold[locsources[i, 0]-1, locsources[i, 1]-1] += energy \n\n# end for\n\n# get final heat in the system\nbheat[0] = np.sum(aold[1:-1, 1:-1])\ncomm.Reduce(bheat, rheat)\n\n# show the result\nprint(f\"3. {name:11s} {rank:02d} {crank:02d} {cid:02d}\")\nif not rank :\n tt += time()\n print( \"1. hostname rank crank cid\")\n print( \"2. ----------- ---- ----- ----\")\n print( \"4. ---------------------------\")\n print(f\"5. Heat:{rheat[0]:.4f}\", end=\", \")\n print(f\"TT:{tt:.4f}\", end=\", \")\n print(f\"KT:{tk:.4f}\", end=\", \")\n print(f\"CT:{tc:.4f}\", end=\", \")\n print(f\"MPI:{size}\", end=\", \")\n print(f\"dim:{n}\", end=\", \")\n print(f\"ite:{niters}\")\n"
] |
[
[
"numpy.zeros",
"numpy.sum",
"numpy.empty"
]
] |
nxbrnt/spotify_data_project
|
[
"37b1c98e3c032f1751ee91b873ecc7b2890595a2"
] |
[
"violin_midcurve.py"
] |
[
"#Imports here for writefile magic only. Not Pythonic.\nimport seaborn as sns\nimport numpy as np\nfrom scipy.interpolate import interp1d\nred = (1,0,0)\nblue = (0,.475,1) #Same relative luminance as primary red (approx)\n\n#can remove a lot of these params for ordering. just wanna be able to flip \n#which goes to left vs right. could stick with color ordering (left is blue)\n\ndef extract_violin_curves(p):\n #Pull vertices from violin KDE estimate plot\n left_points = p.collections[0].get_paths()[0].vertices\n right_points = p.collections[1].get_paths()[0].vertices\n\n #Strip out points where x=0, corresponding to the vertical middle line.\n left_points = [ (a,b) for a,b in left_points if a!=0.0]\n right_points = [ (a,b) for a,b in right_points if a!=0.0]\n #left_points.sort(key=lambda x: x[1])\n #right_points.sort(key=lambda x: x[1])\n\n #Zip into separate x and y arrays\n x_left,y_left = zip(*left_points)\n x_right,y_right = zip(*right_points)\n\n #Interpolated function created from each kde which can be evaluated for \n #arbitrary y values. Return 0 for out of range inputs.\n f_left = interp1d(y_left, x_left, bounds_error=False, fill_value=0.)\n f_right = interp1d(y_right, x_right, bounds_error=False, fill_value=0.)\n\n #Combine list of all y values\n y_all = np.concatenate((y_left,y_right))\n y_all.sort()\n \n return y_all, f_left, f_right\n\ndef violin_midcurve(y, data, hue='ds', y_label=None, \n hue_labels=['My Library','Billboard Top'],\n order=None, hue_order=['nix','top'], bw='silverman',\n colors=[ blue, red ], ax=None, \n ylim=None, title=None):\n\n \"\"\"\n Generates a seaborn violin plot with a \"midcurve\" that indicates \n difference in probability between two distributions.\n \"\"\"\n \n #To do: Implement truncated KDE for distros with known, finite support \n #(e.g. [0,1]). Also implement cross-validated bandwidth via sklearn.\n p = sns.violinplot('', y, hue, data, order, hue_order, bw=bw, cut=3, \n split=True, linewidth=0, palette=colors, color=(0,0,0), \n saturation=1, inner=None, showmedians=True, ax=ax,\n gridsize=200)\n \n #Extract violin curves\n y_all, f_left, f_right = extract_violin_curves(p)\n\n #Create midpoint x values of the two KDE plots, and add to violinplot\n x_mid = ( f_left(y_all)+f_right(y_all) )\n p.plot(x_mid,y_all,c='k',lw=3)\n \n #Plot mean lines.\n #Use hue_order to determine which dataset is which\n if hue_order is None:\n hue_order = data[hue].unique()\n data_left = data[y][ data[hue]==hue_order[0] ]\n data_right = data[y][ data[hue]==hue_order[1] ]\n \n mean_left = data_left.mean()\n mean_right = data_right.mean()\n \n #Could set these to xlim values rather than +/- 100.\n p.plot( [-100, 0], [mean_left]*2, 'k-', lw=3)\n p.plot( [100, 0], [mean_right]*2, 'k-', lw=3)\n \n #Draw redundant y axis for mean line comparison\n p.tick_params(labelright=True)\n \n #Plot vertical line\n y_min = min(y_all)\n y_max = max(y_all)\n p.plot( [0]*2, [y_min,y_max], 'k-', lw=3)\n \n #Tight vertical axis\n p.autoscale(enable=True, axis='y', tight=True)\n \n #Custom vertical range\n if ylim is not None:\n p.set_ylim( [ ylim[0], ylim[1] ] )\n \n #Remove legend and add colored split variable indicators to top of frame\n if hue_labels is None:\n hue_labels = hue_order \n p.legend_.remove()\n p.text(.25, -.04, hue_labels[0], horizontalalignment='center', \n verticalalignment='center', transform = p.transAxes, \n color=colors[0], size=16)\n p.text(.75, -.04, hue_labels[1], horizontalalignment='center', \n verticalalignment='center', transform = p.transAxes, \n color=colors[1], size=16)\n \n #Custom Y axis label. Simply capitalizes column name by default.\n if y_label is None:\n y_label = y.title() #Capitalize first letter\n p.set_ylabel(y_label)\n \n #Title, with default.\n if title is None:\n var = y.title() #Capitalize first letter\n title = 'Track ' + var + ' by Dataset'\n p.set_title(title)\n \n return p"
] |
[
[
"numpy.concatenate",
"scipy.interpolate.interp1d"
]
] |
pmclSF/DeepCompress
|
[
"6fc51aa0e9b34fb89f97877ad56da6345f93b929"
] |
[
"src/model_opt.py"
] |
[
"import numpy as np\nimport logging\nfrom scipy.spatial.ckdtree import cKDTree\nfrom utils.pc_metric import validate_opt_metrics, compute_metrics\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_points_threshold(x_hat, thresholds, len_block, max_delta=np.inf):\n pa_list = []\n for i, t in enumerate(thresholds):\n pa = np.argwhere(x_hat > t).astype('float32')\n if len(pa) == 0:\n break\n len_ratio = len(pa) / len_block\n if (1 / max_delta) < len_ratio < max_delta:\n pa_list.append((i, pa))\n return pa_list\n\n\ndef compute_optimal_thresholds(block, x_hat, thresholds, resolution, normals=None, opt_metrics=['d1_mse'],\n max_deltas=[np.inf], fixed_threshold=False):\n validate_opt_metrics(opt_metrics, with_normals=normals is not None)\n assert len(max_deltas) > 0\n best_thresholds = []\n ret_opt_metrics = [f'{opt_metric}_{max_delta}' for max_delta in max_deltas for opt_metric in opt_metrics]\n if fixed_threshold:\n #half_thr = len(thresholds) // 2\n #half_pa = np.argwhere(x_hat > thresholds[half_thr]).astype('float32')\n # here we copy learned PCGC - we send over the location of the threshold which allows = number of points\n # this saves > 300 seconds per mesh we compress, and also means we theoretically don't need to run the\n # sythesis network to obtain samples\n flat_thr = x_hat.flatten()\n flat_thr.sort()\n flat_thr = flat_thr[-len(block)]\n half_pa = np.argwhere(x_hat > flat_thr).astype('float32')\n logger.info(f'Fixed threshold {half_thr}/{len(thresholds)} with {len(half_pa)}/{len(block)} points (ratio {len(half_pa)/len(block):.2f})')\n return ret_opt_metrics, [int(256*flat_thr)] * len(max_deltas) * len(opt_metrics)\n\n pa_list = build_points_threshold(x_hat, thresholds, len(block))\n max_threshold_idx = len(thresholds) - 1\n if len(pa_list) == 0:\n return ret_opt_metrics, [max_threshold_idx] * len(opt_metrics)\n\n t1 = cKDTree(block[:, :3], balanced_tree=False)\n pa_metrics = [compute_metrics(block[:, :3], pa, resolution - 1, p1_n=normals, t1=t1) for _, pa in pa_list]\n\n log_message = f'Processing max_deltas {max_deltas} on block with {len(block)} points'\n for max_delta in max_deltas:\n if max_delta is not None:\n cur_pa_list = build_points_threshold(x_hat, thresholds, len(block), max_delta)\n if len(cur_pa_list) > 0:\n idx_mask = [x[0] for x in cur_pa_list]\n cur_pa_metrics = [pa_metrics[i] for i in idx_mask]\n else:\n cur_pa_list = pa_list\n cur_pa_metrics = pa_metrics\n else:\n cur_pa_list = pa_list\n cur_pa_metrics = pa_metrics\n log_message += f'\\n{len(cur_pa_list)}/{len(thresholds)} thresholds eligible for max_delta {max_delta}'\n for opt_metric in opt_metrics:\n best_threshold_idx = np.argmin([x[opt_metric] for x in cur_pa_metrics])\n cur_best_metric = cur_pa_metrics[best_threshold_idx][opt_metric]\n\n # Check for failure scenarios\n mean_point_metric = compute_metrics(block[:, :3],\n np.round(np.mean(block[:, :3], axis=0))[np.newaxis, :],\n resolution - 1, p1_n=normals, t1=t1)[opt_metric]\n # In case a single point is better than the network output, this is a failure case\n # Do not output any points\n if cur_best_metric > mean_point_metric:\n best_threshold_idx = max_threshold_idx\n final_idx = best_threshold_idx\n log_message += f', {opt_metric} {final_idx} 0/{len(block)}, metric {cur_best_metric:.2e} > mean point metric {mean_point_metric:.2e}'\n else:\n final_idx = cur_pa_list[best_threshold_idx][0]\n cur_n_points = len(cur_pa_list[best_threshold_idx][1])\n log_message += f', {opt_metric} {final_idx} {cur_n_points}/{len(block)} points (ratio {cur_n_points/len(block):.2f}) {cur_best_metric :.2e} < mean point metric {mean_point_metric:.2e}'\n best_thresholds.append(final_idx)\n logger.info(log_message)\n assert len(ret_opt_metrics) == len(best_thresholds)\n\n return ret_opt_metrics, best_thresholds"
] |
[
[
"numpy.mean",
"scipy.spatial.ckdtree.cKDTree",
"numpy.argmin",
"numpy.argwhere"
]
] |
Floozutter/silly
|
[
"8273b4a33e2001c0a530e859c12dbc30b9590a94"
] |
[
"python/randmoodplot.py"
] |
[
"\"\"\"\nA plotting companion to randmood.py.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom itertools import islice\nfrom typing import Callable, Iterator\n\n\ndef plot_generator(\n\ttitle: str,\n\tgenf: Callable[[float], Iterator[float]],\n\tn: int,\n\tp: float\n) -> None:\n\tplot(\n\t\ttitle,\n\t\trange(n+1),\n\t\tlist(islice(genf(p), n+1))\n\t)\n\ndef plot_solution(\n\ttitle: str,\n\tf: Callable[[int, float], float],\n\tn: int,\n\tp: float\n) -> None:\n\tplot(\n\t\ttitle,\n\t\trange(n+1),\n\t\t[f(i, p) for i in range(n+1)]\n\t)\n\ndef plot(title: str, x, y) -> None:\n\tfig = plt.figure()\n\tax = fig.add_subplot(1, 1, 1)\n\tax.set_title(title)\n\tax.set_xlabel(\"n (seconds)\")\n\tax.set_ylabel(\"p (probability of happy)\")\n\tax.set_ylim(0, 1)\n\tax.plot(x, y)\n\tfig.show()"
] |
[
[
"matplotlib.pyplot.figure"
]
] |
nisarahamedk/detr-tensorflow
|
[
"aa6f283a6b79796804eaef10f3fb1e417fafd2b3"
] |
[
"detr_tf/networks/transformer.py"
] |
[
"import tensorflow as tf\nfrom tensorflow.keras.layers import Dropout, Activation, LayerNormalization\n\nfrom .custom_layers import Linear\n\n\nclass Transformer(tf.keras.Model):\n def __init__(self, model_dim=256, num_heads=8, num_encoder_layers=6,\n num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,\n activation='relu', normalize_before=False,\n return_intermediate_dec=False, **kwargs):\n super().__init__(**kwargs)\n\n self.model_dim = model_dim\n self.num_heads = num_heads\n\n enc_norm = LayerNormalization(epsilon=1e-5, name='norm_pre') if normalize_before else None\n self.encoder = TransformerEncoder(model_dim, num_heads, dim_feedforward,\n dropout, activation, normalize_before, enc_norm,\n num_encoder_layers, name='encoder')\n\n dec_norm = LayerNormalization(epsilon=1e-5, name='norm')\n self.decoder = TransformerDecoder(model_dim, num_heads, dim_feedforward,\n dropout, activation, normalize_before, dec_norm,\n num_decoder_layers, name='decoder',\n return_intermediate=return_intermediate_dec)\n\n\n def call(self, source, mask, query_encoding, pos_encoding, training=False):\n\n batch_size, rows, cols = [tf.shape(source)[i] for i in range(3)]\n source = tf.reshape(source, [batch_size, -1, self.model_dim])\n source = tf.transpose(source, [1, 0, 2])\n\n\n\n pos_encoding = tf.reshape(pos_encoding, [batch_size, -1, self.model_dim])\n pos_encoding = tf.transpose(pos_encoding, [1, 0, 2])\n\n query_encoding = tf.expand_dims(query_encoding, axis=1)\n query_encoding = tf.tile(query_encoding, [1, batch_size, 1])\n\n mask = tf.reshape(mask, [batch_size, -1])\n\n target = tf.zeros_like(query_encoding)\n\n memory = self.encoder(source, source_key_padding_mask=mask,\n pos_encoding=pos_encoding, training=training)\n hs = self.decoder(target, memory, memory_key_padding_mask=mask,\n pos_encoding=pos_encoding, query_encoding=query_encoding,\n training=training)\n\n hs = tf.transpose(hs, [0, 2, 1, 3])\n memory = tf.transpose(memory, [1, 0, 2])\n memory = tf.reshape(memory, [batch_size, rows, cols, self.model_dim])\n\n return hs, memory\n\n\nclass TransformerEncoder(tf.keras.Model):\n def __init__(self, model_dim=256, num_heads=8, dim_feedforward=2048,\n dropout=0.1, activation='relu', normalize_before=False, norm=None,\n num_encoder_layers=6, **kwargs):\n super().__init__(**kwargs)\n\n self.enc_layers = [EncoderLayer(model_dim, num_heads, dim_feedforward,\n dropout, activation, normalize_before,\n name='layer_%d'%i)\n for i in range(num_encoder_layers)]\n \n self.norm = norm\n\n\n def call(self, source, mask=None, source_key_padding_mask=None,\n pos_encoding=None, training=False):\n x = source\n\n\n for layer in self.enc_layers:\n x = layer(x, source_mask=mask, source_key_padding_mask=source_key_padding_mask,\n pos_encoding=pos_encoding, training=training)\n\n if self.norm:\n x = self.norm(x)\n\n return x\n\n\nclass TransformerDecoder(tf.keras.Model):\n def __init__(self, model_dim=256, num_heads=8, dim_feedforward=2048,\n dropout=0.1, activation='relu', normalize_before=False, norm=None,\n num_decoder_layers=6, return_intermediate=False, **kwargs):\n super().__init__(**kwargs)\n\n self.dec_layers = [DecoderLayer(model_dim, num_heads, dim_feedforward,\n dropout, activation, normalize_before,\n name='layer_%d'%i)\n for i in range(num_decoder_layers)]\n\n self.norm = norm\n self.return_intermediate = return_intermediate\n\n\n def call(self, target, memory, target_mask=None, memory_mask=None,\n target_key_padding_mask=None, memory_key_padding_mask=None,\n pos_encoding=None, query_encoding=None, training=False):\n\n x = target\n intermediate = []\n\n\n for layer in self.dec_layers:\n x = layer(x, memory,\n target_mask=target_mask,\n memory_mask=memory_mask,\n target_key_padding_mask=target_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask,\n pos_encoding=pos_encoding,\n query_encoding=query_encoding)\n\n if self.return_intermediate:\n if self.norm:\n intermediate.append(self.norm(x))\n else:\n intermediate.append(x)\n\n if self.return_intermediate:\n return tf.stack(intermediate, axis=0)\n\n if self.norm:\n x = self.norm(x)\n\n return x\n\n\nclass EncoderLayer(tf.keras.layers.Layer):\n def __init__(self, model_dim=256, num_heads=8, dim_feedforward=2048,\n dropout=0.1, activation='relu', normalize_before=False,\n **kwargs):\n super().__init__(**kwargs)\n\n self.self_attn = MultiHeadAttention(model_dim, num_heads, dropout=dropout,\n name='self_attn')\n\n self.dropout = Dropout(dropout)\n self.activation = Activation(activation)\n\n self.linear1 = Linear(dim_feedforward, name='linear1')\n self.linear2 = Linear(model_dim, name='linear2')\n\n self.norm1 = LayerNormalization(epsilon=1e-5, name='norm1')\n self.norm2 = LayerNormalization(epsilon=1e-5, name='norm2')\n\n self.normalize_before = normalize_before\n\n\n def call(self, source, source_mask=None, source_key_padding_mask=None,\n pos_encoding=None, training=False):\n\n\n if pos_encoding is None:\n query = key = source\n else:\n query = key = source + pos_encoding\n\n attn_source = self.self_attn((query, key, source), attn_mask=source_mask,\n key_padding_mask=source_key_padding_mask,\n need_weights=False)\n source += self.dropout(attn_source, training=training)\n source = self.norm1(source)\n\n x = self.linear1(source)\n x = self.activation(x)\n x = self.dropout(x, training=training)\n x = self.linear2(x)\n source += self.dropout(x, training=training)\n source = self.norm2(source)\n \n return source\n\n\n\nclass DecoderLayer(tf.keras.layers.Layer):\n def __init__(self, model_dim=256, num_heads=8, dim_feedforward=2048,\n dropout=0.1, activation='relu', normalize_before=False,\n **kwargs):\n super().__init__(**kwargs)\n\n self.self_attn = MultiHeadAttention(model_dim, num_heads, dropout=dropout,\n name='self_attn')\n self.multihead_attn = MultiHeadAttention(model_dim, num_heads, dropout=dropout,\n name='multihead_attn')\n\n self.dropout = Dropout(dropout)\n self.activation = Activation(activation)\n\n self.linear1 = Linear(dim_feedforward, name='linear1')\n self.linear2 = Linear(model_dim, name='linear2')\n\n self.norm1 = LayerNormalization(epsilon=1e-5, name='norm1')\n self.norm2 = LayerNormalization(epsilon=1e-5, name='norm2')\n self.norm3 = LayerNormalization(epsilon=1e-5, name='norm3')\n\n self.normalize_before = normalize_before\n\n\n def call(self, target, memory, target_mask=None, memory_mask=None,\n target_key_padding_mask=None, memory_key_padding_mask=None,\n pos_encoding=None, query_encoding=None, training=False):\n\n query_tgt = key_tgt = target + query_encoding\n attn_target = self.self_attn((query_tgt, key_tgt, target), attn_mask=target_mask,\n key_padding_mask=target_key_padding_mask,\n need_weights=False)\n target += self.dropout(attn_target, training=training)\n target = self.norm1(target)\n\n query_tgt = target + query_encoding\n key_mem = memory + pos_encoding\n \n attn_target2 = self.multihead_attn((query_tgt, key_mem, memory), attn_mask=memory_mask,\n key_padding_mask=memory_key_padding_mask,\n need_weights=False)\n target += self.dropout(attn_target2, training=training)\n target = self.norm2(target)\n\n x = self.linear1(target)\n x = self.activation(x)\n x = self.dropout(x, training=training)\n x = self.linear2(x)\n target += self.dropout(x, training=training)\n target = self.norm3(target)\n \n return target\n\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n def __init__(self, model_dim, num_heads, dropout=0.0, **kwargs):\n super().__init__(**kwargs)\n\n self.model_dim = model_dim\n self.num_heads = num_heads\n\n assert model_dim % num_heads == 0\n self.head_dim = model_dim // num_heads\n\n self.dropout = Dropout(rate=dropout)\n \n\n def build(self, input_shapes):\n in_dim = sum([shape[-1] for shape in input_shapes[:3]])\n\n self.in_proj_weight = self.add_weight(\n name='in_proj_kernel', shape=(in_dim, self.model_dim),\n initializer=tf.keras.initializers.GlorotUniform(), dtype=tf.float32, trainable=True\n )\n self.in_proj_bias = self.add_weight(\n name='in_proj_bias', shape=(in_dim,),\n initializer=tf.keras.initializers.GlorotUniform(), dtype=tf.float32, trainable=True\n )\n self.out_proj_weight = self.add_weight(\n name='out_proj_kernel', shape=(self.model_dim, self.model_dim),\n initializer=tf.keras.initializers.GlorotUniform(), dtype=tf.float32, trainable=True\n )\n self.out_proj_bias = self.add_weight(\n name='out_proj_bias', shape=(self.model_dim,),\n initializer=tf.keras.initializers.GlorotUniform(), dtype=tf.float32, trainable=True\n )\n\n\n\n\n #self.in_proj_weight = tf.Variable(\n # tf.zeros((in_dim, self.model_dim), dtype=tf.float32), name='in_proj_kernel')\n #self.in_proj_bias = tf.Variable(tf.zeros((in_dim,), dtype=tf.float32),\n # name='in_proj_bias')\n\n #self.out_proj_weight = tf.Variable(\n # tf.zeros((self.model_dim, self.model_dim), dtype=tf.float32), name='out_proj_kernel')\n #self.out_proj_bias = tf.Variable(\n # tf.zeros((self.model_dim,), dtype=tf.float32), name='out_proj_bias')\n\n\n\n def call(self, inputs, attn_mask=None, key_padding_mask=None,\n need_weights=True, training=False):\n\n query, key, value = inputs\n\n batch_size = tf.shape(query)[1]\n target_len = tf.shape(query)[0]\n source_len = tf.shape(key)[0]\n\n W = self.in_proj_weight[:self.model_dim, :]\n b = self.in_proj_bias[:self.model_dim]\n\n WQ = tf.matmul(query, W, transpose_b=True) + b\n\n W = self.in_proj_weight[self.model_dim:2*self.model_dim, :]\n b = self.in_proj_bias[self.model_dim:2*self.model_dim]\n WK = tf.matmul(key, W, transpose_b=True) + b\n\n W = self.in_proj_weight[2*self.model_dim:, :]\n b = self.in_proj_bias[2*self.model_dim:]\n WV = tf.matmul(value, W, transpose_b=True) + b\n\n WQ *= float(self.head_dim) ** -0.5\n WQ = tf.reshape(WQ, [target_len, batch_size * self.num_heads, self.head_dim])\n WQ = tf.transpose(WQ, [1, 0, 2])\n \n WK = tf.reshape(WK, [source_len, batch_size * self.num_heads, self.head_dim])\n WK = tf.transpose(WK, [1, 0, 2])\n\n WV = tf.reshape(WV, [source_len, batch_size * self.num_heads, self.head_dim])\n WV = tf.transpose(WV, [1, 0, 2])\n \n attn_output_weights = tf.matmul(WQ, WK, transpose_b=True)\n\n if attn_mask is not None:\n attn_output_weights += attn_mask\n\n \"\"\"\n if key_padding_mask is not None:\n attn_output_weights = tf.reshape(attn_output_weights,\n [batch_size, self.num_heads, target_len, source_len])\n\n key_padding_mask = tf.expand_dims(key_padding_mask, 1)\n key_padding_mask = tf.expand_dims(key_padding_mask, 2)\n key_padding_mask = tf.tile(key_padding_mask, [1, self.num_heads, target_len, 1])\n\n #print(\"before attn_output_weights\", attn_output_weights.shape)\n attn_output_weights = tf.where(key_padding_mask,\n tf.zeros_like(attn_output_weights) + float('-inf'),\n attn_output_weights)\n attn_output_weights = tf.reshape(attn_output_weights,\n [batch_size * self.num_heads, target_len, source_len])\n \"\"\"\n\n\n attn_output_weights = tf.nn.softmax(attn_output_weights, axis=-1)\n attn_output_weights = self.dropout(attn_output_weights, training=training)\n\n attn_output = tf.matmul(attn_output_weights, WV)\n attn_output = tf.transpose(attn_output, [1, 0, 2])\n attn_output = tf.reshape(attn_output, [target_len, batch_size, self.model_dim])\n attn_output = tf.matmul(attn_output, self.out_proj_weight,\n transpose_b=True) + self.out_proj_bias\n\n if need_weights:\n attn_output_weights = tf.reshape(attn_output_weights,\n [batch_size, self.num_heads, target_len, source_len])\n # Retrun the average weight over the heads\n avg_weights = tf.reduce_mean(attn_output_weights, axis=1)\n return attn_output, avg_weights\n \n return attn_output\n"
] |
[
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.keras.layers.Activation",
"tensorflow.shape",
"tensorflow.reduce_mean",
"tensorflow.stack",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.zeros_like",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.initializers.GlorotUniform",
"tensorflow.tile"
]
] |
zhangkunliang/BayesOptimization
|
[
"6d78c9e9f96239b0dbb85650a0d878e9410158ec"
] |
[
"examples/grain_bound/bayesOptimization/interactive/bo_mao_interactive.py"
] |
[
"#!/usr/bin/env python\n# coding=utf-8\nfrom __future__ import print_function\n\nimport numpy as np\nimport pickle as pickle\nimport scipy\nimport combo\nimport os\nimport urllib\nimport matplotlib.pyplot as plt\n\nnum = 0\n\n\ndef load_data():\n A = np.loadtxt('descriptor.dat')\n print(A.shape)\n X = A\n print(X.shape[0])\n return X\n\n\nlog = open('../bo_log', 'w')\n\n# Load the Cnb_test_data.\n# X is the N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate. \n# t is the N-dimensional vector that represents the corresponding negative energy of search candidates. \n# ( It is of course unknown in practice. )\nX = load_data()\n\n\n# Normalize the mean and standard deviation along the each column of X to 0 and 1, respectively\n# X = combo.misc.centering( X )\n\n# Declare the class for calling the simulator. \n# In this tutorial, we simply refer to the value of t. \n# If you want to apply combo to other problems, you have to customize this class. \nclass simulator:\n\n def __init__(self):\n print('Call simulator')\n self.t = np.zeros(X.shape[0])\n # self.t = np.loadtxt('mao_03.out')\n print('Hello!')\n\n # __call__函数:使类实例对象可以像调用普通函数那样,以”对象名()“的形式使用\n def __call__(self, action):\n global num\n num = num + 1\n print(num)\n if num <= 26:\n print(action + 1)\n structure_current = X[action, :]\n np.savetxt('input_Descriptor_1st.dat', action + 1, fmt='%d')\n os.system('ifort -o F90_ReadThermalConductance_For_Combo.exe F90_ReadThermalConductance_For_Combo.f90')\n os.system('./F90_ReadThermalConductance_For_Combo.exe>out.f90')\n os.system('mv out.f90 ./out_' + str(num) + '.f90')\n etot = np.loadtxt('G_300_01.dat')\n os.system('mv G_300_01.dat ./G_300_01_' + str(num) + '.dat')\n self.t[action] = -etot\n log.write(str(action + 1))\n log.write('\\n')\n log.flush()\n print(X[action, :])\n print(etot)\n np.savetxt('mao_03.out', self.t, fmt='%1.4f')\n return self.t[action]\n\n else:\n print(action + 1)\n structure_current = X[action, :]\n np.savetxt('input_Descriptor_1st.dat', action + 1, fmt='%d')\n log.write(str(action + 1))\n log.write('\\n')\n log.flush()\n print(X[action, :])\n np.savetxt('mao_03.out', self.t, fmt='%1.4f')\n return self.t[action]\n\n\n# Design of policy\n\n# Declaring the policy by \npolicy = combo.search.discrete.policy(test_X=X)\n# test_X is the set of candidates which is represented by numpy.array.\n# Each row vector represents the feature vector of the corresponding candidate\n\n# set the seed parameter \npolicy.set_seed(10)\n\nsimulator = simulator()\n\n''' 1st step (random sampling) '''\nactions = policy.random_search(max_num_probes=1, num_search_each_probe=10, simulator=None)\nt = simulator(actions)\npolicy.write(actions, t)\ncombo.search.utility.show_search_results(policy.history, 10)\n\n''' 2st step (random sampling) '''\nactions = policy.random_search(max_num_probes=1, num_search_each_probe=10, simulator=None)\nt = simulator(actions)\npolicy.write(actions, t)\ncombo.search.utility.show_search_results(policy.history, 10)\n\n''' 3rd step (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search results\n\npredictor = policy.predictor\ntraining = policy.training\n\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10,\n predictor=predictor, training=training,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search results\n\n''' 4-th step (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search results\n\npredictor = policy.predictor\ntraining = policy.training\n\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10,\n predictor=predictor, training=training,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search results\n\nwith open('../predictor.dump', 'w') as f:\n pickle.dump(policy.predictor, f)\npolicy.training.save('training.npz')\npolicy.history.save('history.npz')\n\n''' delete policy'''\ndel policy\n\npolicy = combo.search.discrete.policy(test_X=X)\npolicy.load('history.npz', 'training.npz', 'predictor.dump')\n\n''' 5-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 6-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 7-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 8-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 9-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 10-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 11-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 12-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 13-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 14-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 15-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 16-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 17-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 18-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 19-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 20-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 21-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 22-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 23-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 24-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n''' 25-th probe (bayesian optimization) '''\nactions = policy.bayes_search(max_num_probes=1, num_search_each_probe=10, predictor=predictor,\n simulator=None, score='EI', interval=0, num_rand_basis=0)\nt = simulator(actions) # experiment\npolicy.write(actions, t) # record new observations\ncombo.search.utility.show_search_results(policy.history, 10) # describe search result\n\n# res=policy.history\n\n# plt.plot(res.fx[0:res.total_num_search])\n\n# plt.savefig('Cnb_test.png', dpi = 300)\n"
] |
[
[
"numpy.savetxt",
"numpy.zeros",
"numpy.loadtxt"
]
] |
xzy256/tensorflow-mips
|
[
"f45787d2f30e8109a67f7fa5a586167a71489d21"
] |
[
"tensorflow/tensorboard/backend/application_test.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Integration tests for TensorBoard.\n\nThese tests start up a full-fledged TensorBoard server.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport json\nimport numbers\nimport os\nimport shutil\nimport socket\nimport tempfile\nimport threading\n\nfrom six import BytesIO\nfrom six.moves import http_client\nimport tensorflow as tf\n\nfrom werkzeug import serving\nfrom google.protobuf import text_format\n\nfrom tensorflow.core.protobuf import meta_graph_pb2\nfrom tensorflow.tensorboard import tensorboard\nfrom tensorflow.tensorboard.backend import application\nfrom tensorflow.tensorboard.backend.event_processing import event_multiplexer\nfrom tensorflow.tensorboard.plugins import base_plugin\n\n\nclass FakePlugin(base_plugin.TBPlugin):\n \"\"\"A plugin with no functionality.\"\"\"\n\n def __init__(self, plugin_name, is_active_value, routes_mapping):\n \"\"\"Constructs a fake plugin.\n\n Args:\n plugin_name: The name of this plugin.\n is_active_value: Whether the plugin is active.\n routes_mapping: A dictionary mapping from route (string URL path) to the\n method called when a user issues a request to that route.\n \"\"\"\n self.plugin_name = plugin_name\n self._is_active_value = is_active_value\n self._routes_mapping = routes_mapping\n\n def get_plugin_apps(self, multiplexer, logdir):\n \"\"\"Returns a mapping from routes to handlers offered by this plugin.\n\n Args:\n multiplexer: The event multiplexer.\n logdir: The path to the directory containing logs.\n\n Returns:\n A dictionary mapping from routes to handlers offered by this plugin.\n \"\"\"\n return self._routes_mapping\n\n def is_active(self):\n \"\"\"Returns whether this plugin is active.\n\n Returns:\n A boolean. Whether this plugin is active.\n \"\"\"\n return self._is_active_value\n\n\nclass TensorboardServerTest(tf.test.TestCase):\n _only_use_meta_graph = False # Server data contains only a GraphDef\n\n def setUp(self):\n self.temp_dir = self._GenerateTestData()\n multiplexer = event_multiplexer.EventMultiplexer(\n size_guidance=application.DEFAULT_SIZE_GUIDANCE,\n purge_orphaned_data=True)\n plugins = [\n FakePlugin(plugin_name='foo', is_active_value=True, routes_mapping={}),\n FakePlugin(plugin_name='bar', is_active_value=False, routes_mapping={})\n ]\n app = application.TensorBoardWSGIApp(\n self.temp_dir, plugins, multiplexer, reload_interval=0)\n try:\n self._server = serving.BaseWSGIServer('localhost', 0, app)\n # 0 to pick an unused port.\n except IOError:\n # BaseWSGIServer has a preference for IPv4. If that didn't work, try again\n # with an explicit IPv6 address.\n self._server = serving.BaseWSGIServer('::1', 0, app)\n self._server_thread = threading.Thread(target=self._server.serve_forever)\n self._server_thread.daemon = True\n self._server_thread.start()\n self._connection = http_client.HTTPConnection(\n 'localhost', self._server.server_address[1])\n\n def tearDown(self):\n self._connection.close()\n self._server.shutdown()\n self._server.server_close()\n\n def _get(self, path, headers=None):\n \"\"\"Perform a GET request for the given path.\"\"\"\n if headers is None:\n headers = {}\n self._connection.request('GET', path, None, headers)\n return self._connection.getresponse()\n\n def _getJson(self, path):\n \"\"\"Perform a GET request and decode the result as JSON.\"\"\"\n self._connection.request('GET', path)\n response = self._connection.getresponse()\n self.assertEqual(response.status, 200)\n data = response.read()\n if response.getheader('Content-Encoding') == 'gzip':\n data = gzip.GzipFile('', 'rb', 9, BytesIO(data)).read()\n return json.loads(data.decode('utf-8'))\n\n def testBasicStartup(self):\n \"\"\"Start the server up and then shut it down immediately.\"\"\"\n pass\n\n def testRequestMainPage(self):\n \"\"\"Navigate to the main page and verify that it returns a 200.\"\"\"\n response = self._get('/')\n self.assertEqual(response.status, 200)\n\n def testRequestNonexistentPage(self):\n \"\"\"Request a page that doesn't exist; it should 404.\"\"\"\n response = self._get('/asdf')\n self.assertEqual(response.status, 404)\n\n def testLogdir(self):\n \"\"\"Test the format of the data/logdir endpoint.\"\"\"\n parsed_object = self._getJson('/data/logdir')\n self.assertEqual(parsed_object, {'logdir': self.temp_dir})\n\n def testPluginsListing(self):\n \"\"\"Test the format of the data/plugins_listing endpoint.\"\"\"\n parsed_object = self._getJson('/data/plugins_listing')\n # Plugin foo is active. Plugin bar is not.\n self.assertEqual(parsed_object, {'foo': True, 'bar': False})\n\n def testRuns(self):\n \"\"\"Test the format of the /data/runs endpoint.\"\"\"\n run_json = self._getJson('/data/runs')\n\n # Don't check the actual timestamp since it's time-dependent.\n self.assertTrue(\n isinstance(run_json['run1']['firstEventTimestamp'], numbers.Number))\n del run_json['run1']['firstEventTimestamp']\n self.assertEqual(\n run_json,\n {\n 'run1': {\n 'compressedHistograms': ['histogram'],\n 'audio': ['audio'],\n # if only_use_meta_graph, the graph is from the metagraph\n 'graph': True,\n 'meta_graph': self._only_use_meta_graph,\n 'run_metadata': ['test run'],\n 'tensors': [],\n }\n })\n\n def testApplicationPaths_getCached(self):\n \"\"\"Test the format of the /data/runs endpoint.\"\"\"\n for path in ('/',): # TODO(jart): '/app.js' in open source\n connection = http_client.HTTPConnection('localhost',\n self._server.server_address[1])\n connection.request('GET', path)\n response = connection.getresponse()\n self.assertEqual(response.status, 200, msg=path)\n self.assertEqual(\n response.getheader('Cache-Control'),\n 'private, max-age=3600',\n msg=path)\n connection.close()\n\n def testDataPaths_disableAllCaching(self):\n \"\"\"Test the format of the /data/runs endpoint.\"\"\"\n for path in ('/data/runs', '/data/logdir', '/data/audio?run=run1&tag=audio',\n '/data/run_metadata?run=run1&tag=test%20run'):\n connection = http_client.HTTPConnection('localhost',\n self._server.server_address[1])\n connection.request('GET', path)\n response = connection.getresponse()\n self.assertEqual(response.status, 200, msg=path)\n self.assertEqual(response.getheader('Expires'), '0', msg=path)\n response.read()\n connection.close()\n\n def testAudio(self):\n \"\"\"Test listing audio and retrieving an individual audio clip.\"\"\"\n audio_json = self._getJson('/data/audio?tag=audio&run=run1')\n audio_query = audio_json[0]['query']\n # We don't care about the format of the audio query.\n del audio_json[0]['query']\n self.assertEqual(audio_json, [{\n 'wall_time': 0,\n 'step': 0,\n 'content_type': 'audio/wav'\n }])\n response = self._get('/data/individualAudio?%s' % audio_query)\n self.assertEqual(response.status, 200)\n\n def testGraph(self):\n \"\"\"Test retrieving the graph definition.\"\"\"\n response = self._get('/data/graph?run=run1&limit_attr_size=1024'\n '&large_attrs_key=_very_large_attrs')\n self.assertEqual(response.status, 200)\n graph_pbtxt = response.read()\n # Parse the graph from pbtxt into a graph message.\n graph = tf.GraphDef()\n graph = text_format.Parse(graph_pbtxt, graph)\n self.assertEqual(len(graph.node), 2)\n self.assertEqual(graph.node[0].name, 'a')\n self.assertEqual(graph.node[1].name, 'b')\n # Make sure the second node has an attribute that was filtered out because\n # it was too large and was added to the \"too large\" attributes list.\n self.assertEqual(list(graph.node[1].attr.keys()), ['_very_large_attrs'])\n self.assertEqual(graph.node[1].attr['_very_large_attrs'].list.s,\n [b'very_large_attr'])\n\n def testAcceptGzip_compressesResponse(self):\n response = self._get('/data/graph?run=run1&limit_attr_size=1024'\n '&large_attrs_key=_very_large_attrs',\n {'Accept-Encoding': 'gzip'})\n self.assertEqual(response.status, 200)\n self.assertEqual(response.getheader('Content-Encoding'), 'gzip')\n pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()\n graph = text_format.Parse(pbtxt, tf.GraphDef())\n self.assertEqual(len(graph.node), 2)\n\n def testAcceptAnyEncoding_compressesResponse(self):\n response = self._get('/data/graph?run=run1&limit_attr_size=1024'\n '&large_attrs_key=_very_large_attrs',\n {'Accept-Encoding': '*'})\n self.assertEqual(response.status, 200)\n self.assertEqual(response.getheader('Content-Encoding'), 'gzip')\n pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()\n graph = text_format.Parse(pbtxt, tf.GraphDef())\n self.assertEqual(len(graph.node), 2)\n\n def testAcceptDoodleEncoding_doesNotCompressResponse(self):\n response = self._get('/data/graph?run=run1&limit_attr_size=1024'\n '&large_attrs_key=_very_large_attrs',\n {'Accept-Encoding': 'doodle'})\n self.assertEqual(response.status, 200)\n self.assertIsNone(response.getheader('Content-Encoding'))\n graph = text_format.Parse(response.read(), tf.GraphDef())\n self.assertEqual(len(graph.node), 2)\n\n def testRunMetadata(self):\n \"\"\"Test retrieving the run metadata information.\"\"\"\n response = self._get('/data/run_metadata?run=run1&tag=test%20run')\n self.assertEqual(response.status, 200)\n run_metadata_pbtxt = response.read()\n # Parse from pbtxt into a message.\n run_metadata = tf.RunMetadata()\n text_format.Parse(run_metadata_pbtxt, run_metadata)\n self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)\n self.assertEqual(run_metadata.step_stats.dev_stats[0].device, 'test device')\n\n def _GenerateTestData(self):\n \"\"\"Generates the test data directory.\n\n The test data has a single run named run1 which contains:\n - a histogram [1]\n - a graph definition\n\n [1]: Histograms no longer appear in `/runs`, but compressed\n histograms do, and they use the same test data. Thus, histograms are\n still here for now.\n\n Returns:\n temp_dir: The directory the test data is generated under.\n \"\"\"\n temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())\n self.addCleanup(shutil.rmtree, temp_dir)\n run1_path = os.path.join(temp_dir, 'run1')\n os.makedirs(run1_path)\n writer = tf.summary.FileWriter(run1_path)\n\n histogram_value = tf.HistogramProto(\n min=0,\n max=2,\n num=3,\n sum=6,\n sum_squares=5,\n bucket_limit=[0, 1, 2],\n bucket=[1, 1, 1])\n # Add a simple graph event.\n graph_def = tf.GraphDef()\n node1 = graph_def.node.add()\n node1.name = 'a'\n node2 = graph_def.node.add()\n node2.name = 'b'\n node2.attr['very_large_attr'].s = b'a' * 2048 # 2 KB attribute\n\n meta_graph_def = meta_graph_pb2.MetaGraphDef(graph_def=graph_def)\n\n if self._only_use_meta_graph:\n writer.add_meta_graph(meta_graph_def)\n else:\n writer.add_graph(graph_def)\n\n # Add a simple run metadata event.\n run_metadata = tf.RunMetadata()\n device_stats = run_metadata.step_stats.dev_stats.add()\n device_stats.device = 'test device'\n writer.add_run_metadata(run_metadata, 'test run')\n\n audio_value = tf.Summary.Audio(\n sample_rate=44100,\n length_frames=22050,\n num_channels=2,\n encoded_audio_string=b'',\n content_type='audio/wav')\n writer.add_event(\n tf.Event(\n wall_time=0,\n step=0,\n summary=tf.Summary(value=[\n tf.Summary.Value(tag='histogram', histo=histogram_value),\n tf.Summary.Value(tag='audio', audio=audio_value)\n ])))\n\n writer.flush()\n writer.close()\n\n return temp_dir\n\n\nclass TensorboardServerPluginNameTest(tf.test.TestCase):\n\n def _test(self, name, should_be_okay):\n temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())\n self.addCleanup(shutil.rmtree, temp_dir)\n multiplexer = event_multiplexer.EventMultiplexer(\n size_guidance=application.DEFAULT_SIZE_GUIDANCE,\n purge_orphaned_data=True)\n plugins = [\n FakePlugin(plugin_name='foo', is_active_value=True, routes_mapping={}),\n FakePlugin(plugin_name=name, is_active_value=True, routes_mapping={}),\n FakePlugin(plugin_name='bar', is_active_value=False, routes_mapping={})\n ]\n if should_be_okay:\n application.TensorBoardWSGIApp(\n temp_dir, plugins, multiplexer, reload_interval=0)\n else:\n with self.assertRaisesRegexp(ValueError, r'invalid name'):\n application.TensorBoardWSGIApp(\n temp_dir, plugins, multiplexer, reload_interval=0)\n\n def testEmptyName(self):\n self._test('', False)\n\n def testNameWithSlashes(self):\n self._test('scalars/data', False)\n\n def testNameWithSpaces(self):\n self._test('my favorite plugin', False)\n\n def testSimpleName(self):\n self._test('scalars', True)\n\n def testComprehensiveName(self):\n self._test('Scalar-Dashboard_3000.1', True)\n\n\nclass TensorboardServerPluginRouteTest(tf.test.TestCase):\n\n def _test(self, route, should_be_okay):\n temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())\n self.addCleanup(shutil.rmtree, temp_dir)\n multiplexer = event_multiplexer.EventMultiplexer(\n size_guidance=application.DEFAULT_SIZE_GUIDANCE,\n purge_orphaned_data=True)\n plugins = [\n FakePlugin(\n plugin_name='foo',\n is_active_value=True,\n routes_mapping={route: lambda environ, start_response: None}),\n ]\n if should_be_okay:\n application.TensorBoardWSGIApp(\n temp_dir, plugins, multiplexer, reload_interval=0)\n else:\n with self.assertRaisesRegexp(ValueError, r'invalid route'):\n application.TensorBoardWSGIApp(\n temp_dir, plugins, multiplexer, reload_interval=0)\n\n def testNormalRoute(self):\n self._test('/runs', True)\n\n def testEmptyRoute(self):\n self._test('', False)\n\n def testSlashlessRoute(self):\n self._test('runaway', False)\n\n\nclass TensorboardServerUsingMetagraphOnlyTest(TensorboardServerTest):\n # Tests new ability to use only the MetaGraphDef\n _only_use_meta_graph = True # Server data contains only a MetaGraphDef\n\n\nclass ParseEventFilesSpecTest(tf.test.TestCase):\n\n def testRunName(self):\n logdir = 'lol:/cat'\n expected = {'/cat': 'lol'}\n self.assertEqual(application.parse_event_files_spec(logdir), expected)\n\n def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):\n logdir = '/lol:/cat'\n expected = {'/lol:/cat': None}\n self.assertEqual(application.parse_event_files_spec(logdir), expected)\n\n def testMultipleDirectories(self):\n logdir = '/a,/b'\n expected = {'/a': None, '/b': None}\n self.assertEqual(application.parse_event_files_spec(logdir), expected)\n\n def testNormalizesPaths(self):\n logdir = '/lol/.//cat/../cat'\n expected = {'/lol/cat': None}\n self.assertEqual(application.parse_event_files_spec(logdir), expected)\n\n def testAbsolutifies(self):\n logdir = 'lol/cat'\n expected = {os.path.realpath('lol/cat'): None}\n self.assertEqual(application.parse_event_files_spec(logdir), expected)\n\n def testRespectsGCSPath(self):\n logdir = 'gs://foo/path'\n expected = {'gs://foo/path': None}\n self.assertEqual(application.parse_event_files_spec(logdir), expected)\n\n def testRespectsHDFSPath(self):\n logdir = 'hdfs://foo/path'\n expected = {'hdfs://foo/path': None}\n self.assertEqual(application.parse_event_files_spec(logdir), expected)\n\n def testDoesNotExpandUserInGCSPath(self):\n logdir = 'gs://~/foo/path'\n expected = {'gs://~/foo/path': None}\n self.assertEqual(application.parse_event_files_spec(logdir), expected)\n\n def testDoesNotNormalizeGCSPath(self):\n logdir = 'gs://foo/./path//..'\n expected = {'gs://foo/./path//..': None}\n self.assertEqual(application.parse_event_files_spec(logdir), expected)\n\n def testRunNameWithGCSPath(self):\n logdir = 'lol:gs://foo/path'\n expected = {'gs://foo/path': 'lol'}\n self.assertEqual(application.parse_event_files_spec(logdir), expected)\n\n\nclass TensorBoardAssetsTest(tf.test.TestCase):\n\n def testTagFound(self):\n tag = application.get_tensorboard_tag()\n self.assertTrue(tag)\n app = application.standard_tensorboard_wsgi('', True, 60, [])\n self.assertEqual(app.tag, tag)\n\n\nclass TensorBoardPluginsTest(tf.test.TestCase):\n\n def testPluginsAdded(self):\n\n def foo_handler():\n pass\n\n def bar_handler():\n pass\n\n plugins = [\n FakePlugin(\n plugin_name='foo',\n is_active_value=True,\n routes_mapping={'/foo_route': foo_handler}),\n FakePlugin(\n plugin_name='bar',\n is_active_value=True,\n routes_mapping={'/bar_route': bar_handler}),\n ]\n\n # The application should have added routes for both plugins.\n app = application.standard_tensorboard_wsgi('', True, 60, plugins)\n\n # The routes are prefixed with /data/plugin/[plugin name].\n self.assertDictContainsSubset({\n '/data/plugin/foo/foo_route': foo_handler,\n '/data/plugin/bar/bar_route': bar_handler,\n }, app.data_applications)\n\n\nclass TensorboardSimpleServerConstructionTest(tf.test.TestCase):\n \"\"\"Tests that the default HTTP server is constructed without error.\n\n Mostly useful for IPv4/IPv6 testing. This test should run with only IPv4, only\n IPv6, and both IPv4 and IPv6 enabled.\n \"\"\"\n\n class _StubApplication(object):\n tag = ''\n\n def testMakeServerBlankHost(self):\n # Test that we can bind to all interfaces without throwing an error\n server, url = tensorboard.make_simple_server(\n self._StubApplication(),\n host='',\n port=0) # Grab any available port\n self.assertTrue(server)\n self.assertTrue(url)\n\n def testSpecifiedHost(self):\n one_passed = False\n try:\n _, url = tensorboard.make_simple_server(\n self._StubApplication(),\n host='127.0.0.1',\n port=0)\n self.assertStartsWith(actual=url, expected_start='http://127.0.0.1:')\n one_passed = True\n except socket.error:\n # IPv4 is not supported\n pass\n try:\n _, url = tensorboard.make_simple_server(\n self._StubApplication(),\n host='::1',\n port=0)\n self.assertStartsWith(actual=url, expected_start='http://[::1]:')\n one_passed = True\n except socket.error:\n # IPv6 is not supported\n pass\n self.assertTrue(one_passed) # We expect either IPv4 or IPv6 to be supported\n\n\nclass TensorBoardApplcationConstructionTest(tf.test.TestCase):\n\n def testExceptions(self):\n logdir = '/fake/foo'\n multiplexer = event_multiplexer.EventMultiplexer()\n\n # Fails if there is an unnamed plugin\n with self.assertRaises(ValueError):\n # This plugin lacks a name.\n plugins = [\n FakePlugin(plugin_name=None, is_active_value=True, routes_mapping={})\n ]\n application.TensorBoardWSGIApp(logdir, plugins, multiplexer, 0)\n\n # Fails if there are two plugins with same name\n with self.assertRaises(ValueError):\n plugins = [\n FakePlugin(\n plugin_name='foo', is_active_value=True, routes_mapping={}),\n FakePlugin(\n plugin_name='foo', is_active_value=True, routes_mapping={}),\n ]\n application.TensorBoardWSGIApp(logdir, plugins, multiplexer, 0)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.core.protobuf.meta_graph_pb2.MetaGraphDef",
"tensorflow.tensorboard.backend.event_processing.event_multiplexer.EventMultiplexer",
"tensorflow.Summary.Audio",
"tensorflow.summary.FileWriter",
"tensorflow.tensorboard.backend.application.get_tensorboard_tag",
"tensorflow.tensorboard.backend.application.TensorBoardWSGIApp",
"tensorflow.tensorboard.backend.application.standard_tensorboard_wsgi",
"tensorflow.RunMetadata",
"tensorflow.test.main",
"tensorflow.tensorboard.backend.application.parse_event_files_spec",
"tensorflow.Summary.Value",
"tensorflow.HistogramProto",
"tensorflow.GraphDef"
]
] |
iabhibits/QANet_SQUAD_2.0
|
[
"760f1147038411beb205401ae902d6ec3ac4468d"
] |
[
"Baseline/train.py"
] |
[
"\"\"\"Train a model on SQuAD.\n\nAuthor:\n Chris Chute ([email protected])\n\"\"\"\n\nimport numpy as np\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as sched\nimport torch.utils.data as data\nimport util\n\nfrom args import get_train_args\nfrom collections import OrderedDict\nfrom json import dumps\nfrom models import BiDAF\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\nfrom ujson import load as json_load\nfrom util import collate_fn, SQuAD\n\n\ndef main(args):\n # Set up logging and devices\n args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)\n log = util.get_logger(args.save_dir, args.name)\n tbx = SummaryWriter(args.save_dir)\n device, args.gpu_ids = util.get_available_devices()\n log.info('Args: {}'.format(dumps(vars(args), indent=4, sort_keys=True)))\n args.batch_size *= max(1, len(args.gpu_ids))\n\n # Set random seed\n log.info('Using random seed {}...'.format(args.seed))\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\n # Get embeddings\n log.info('Loading embeddings...')\n word_vectors = util.torch_from_json(args.word_emb_file)\n\n # Get model\n log.info('Building model...')\n model = BiDAF(word_vectors=word_vectors,\n hidden_size=args.hidden_size,\n drop_prob=args.drop_prob)\n model = nn.DataParallel(model, args.gpu_ids)\n if args.load_path:\n log.info('Loading checkpoint from {}...'.format(args.load_path))\n model, step = util.load_model(model, args.load_path, args.gpu_ids)\n else:\n step = 0\n model = model.to(device)\n model.train()\n ema = util.EMA(model, args.ema_decay)\n\n # Get saver\n saver = util.CheckpointSaver(args.save_dir,\n max_checkpoints=args.max_checkpoints,\n metric_name=args.metric_name,\n maximize_metric=args.maximize_metric,\n log=log)\n\n # Get optimizer and scheduler\n optimizer = optim.Adadelta(model.parameters(), args.lr,\n weight_decay=args.l2_wd)\n scheduler = sched.LambdaLR(optimizer, lambda s: 1.) # Constant LR\n\n # Get data loader\n log.info('Building dataset...')\n train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)\n train_loader = data.DataLoader(train_dataset,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)\n dev_loader = data.DataLoader(dev_dataset,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n\n # Train\n log.info('Training...')\n steps_till_eval = args.eval_steps\n epoch = step // len(train_dataset)\n while epoch != args.num_epochs:\n epoch += 1\n log.info('Starting epoch {}...'.format(epoch))\n with torch.enable_grad(), \\\n tqdm(total=len(train_loader.dataset)) as progress_bar:\n for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:\n # Setup for forward\n cw_idxs = cw_idxs.to(device)\n qw_idxs = qw_idxs.to(device)\n batch_size = cw_idxs.size(0)\n optimizer.zero_grad()\n\n # Forward\n log_p1, log_p2 = model(cw_idxs, qw_idxs)\n y1, y2 = y1.to(device), y2.to(device)\n loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)\n loss_val = loss.item()\n\n # Backward\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n optimizer.step()\n scheduler.step(step // batch_size)\n ema(model, step // batch_size)\n\n # Log info\n step += batch_size\n progress_bar.update(batch_size)\n progress_bar.set_postfix(epoch=epoch,\n NLL=loss_val)\n tbx.add_scalar('train/NLL', loss_val, step)\n tbx.add_scalar('train/LR',\n optimizer.param_groups[0]['lr'],\n step)\n\n steps_till_eval -= batch_size\n if steps_till_eval <= 0:\n steps_till_eval = args.eval_steps\n\n # Evaluate and save checkpoint\n log.info('Evaluating at step {}...'.format(step))\n ema.assign(model)\n results, pred_dict = evaluate(model, dev_loader, device,\n args.dev_eval_file,\n args.max_ans_len,\n args.use_squad_v2)\n saver.save(step, model, results[args.metric_name], device)\n ema.resume(model)\n\n # Log to console\n results_str = ', '.join('{}: {:05.2f}'.format(k, v)\n for k, v in results.items())\n log.info('Dev {}'.format(results_str))\n\n # Log to TensorBoard\n log.info('Visualizing in TensorBoard...')\n for k, v in results.items():\n tbx.add_scalar('dev/{}'.format(k), v, step)\n util.visualize(tbx,\n pred_dict=pred_dict,\n eval_path=args.dev_eval_file,\n step=step,\n split='dev',\n num_visuals=args.num_visuals)\n\n\ndef evaluate(model, data_loader, device, eval_file, max_len, use_squad_v2):\n nll_meter = util.AverageMeter()\n\n model.eval()\n pred_dict = {}\n with open(eval_file, 'r') as fh:\n gold_dict = json_load(fh)\n with torch.no_grad(), \\\n tqdm(total=len(data_loader.dataset)) as progress_bar:\n for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in data_loader:\n # Setup for forward\n cw_idxs = cw_idxs.to(device)\n qw_idxs = qw_idxs.to(device)\n batch_size = cw_idxs.size(0)\n\n # Forward\n log_p1, log_p2 = model(cw_idxs, qw_idxs)\n y1, y2 = y1.to(device), y2.to(device)\n loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)\n nll_meter.update(loss.item(), batch_size)\n\n # Get F1 and EM scores\n p1, p2 = log_p1.exp(), log_p2.exp()\n starts, ends = util.discretize(p1, p2, max_len, use_squad_v2)\n\n # Log info\n progress_bar.update(batch_size)\n progress_bar.set_postfix(NLL=nll_meter.avg)\n\n preds, _ = util.convert_tokens(gold_dict,\n ids.tolist(),\n starts.tolist(),\n ends.tolist(),\n use_squad_v2)\n pred_dict.update(preds)\n\n model.train()\n\n results = util.eval_dicts(gold_dict, pred_dict, use_squad_v2)\n results_list = [('NLL', nll_meter.avg),\n ('F1', results['F1']),\n ('EM', results['EM'])]\n if use_squad_v2:\n results_list.append(('AvNA', results['AvNA']))\n results = OrderedDict(results_list)\n\n return results, pred_dict\n\n\nif __name__ == '__main__':\n main(get_train_args())\n"
] |
[
[
"torch.optim.lr_scheduler.LambdaLR",
"torch.enable_grad",
"numpy.random.seed",
"torch.nn.functional.nll_loss",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.nn.DataParallel"
]
] |
phrenico/cmfsapy
|
[
"9eb0b335a3e1f2c4a02fd7edf7b22b78d4ea9931"
] |
[
"src/cmfsapy/dimension/fsa.py"
] |
[
"import numpy as np\nfrom scipy.spatial import cKDTree\nfrom multiprocessing import cpu_count\nfrom scipy.stats import hmean\n\n\ndef get_dists_inds_ck(X, k, boxsize):\n \"\"\"computes the kNN distances and indices\n\n :param numpy.ndarray X: 2D array with data shape: (ndata, n_vars)\n :param int k: neighborhood size\n :param float boxsize: circular boundary condition to [0, boxsice] interval for all input dimensions if not None.\n :return: KNN distances and indices\n \"\"\"\n tree = cKDTree(X, boxsize=boxsize)\n dists, inds = tree.query(X, k + 1, n_jobs=cpu_count())\n return dists, inds\n\ndef szepesvari_dimensionality(dists):\n \"\"\"Compute szepesvari dimensions from kNN distances\n\n :param dists:\n :return:\n \"\"\"\n n = dists.shape[1]\n lower_k = np.arange(np.ceil(n / 2)).astype(int)\n upper_k = np.arange(n)[::2]\n d = - np.log(2) / np.log(dists[:, lower_k] / dists[:, upper_k])\n return d\n\ndef fsa(X, k, boxsize=None):\n \"\"\"Measure local Szepesvari-Farahmand dimension, distances are computed by the cKDTree algoritm\n\n :param arraylike X: data series [n x dim] shape\n :param k: maximum k value\n :param boxsize: apply d-toroidal distance computation with edge-size =boxsize, see ckdtree class for more\n :return: local estimates, distances, indicees\n \"\"\"\n dists, inds = get_dists_inds_ck(X, 2*k, boxsize)\n dims = szepesvari_dimensionality(dists)\n return dims, dists, inds\n\ndef ml_estimator(normed_dists):\n return -1./ np.nanmean(np.log(normed_dists), axis=1)\n\ndef ml_dims(X, k2, k1=1):\n \"\"\"Maximum likelihood estimator af intrinsic dimension (Levina-Bickel)\"\"\"\n dists, inds = get_dists_inds_ck(X, k2+1, boxsize=None)\n norm_dists = dists / dists[:, -1:]\n dims = ml_estimator(norm_dists[:, k1:-1])\n return dims, dists, inds\n\ndef szepes_ml(local_d):\n \"\"\"maximum likelihood estimator from local FSA estimates (for k=1)\n\n :param numpy.ndarray of float local_d: local FSA estimates\n :return: global ML-FSA estimate\n \"\"\"\n return hmean(local_d) / np.log(2)"
] |
[
[
"numpy.log",
"numpy.arange",
"scipy.stats.hmean",
"numpy.ceil",
"scipy.spatial.cKDTree"
]
] |
sanderisbestok/ExtremeNet
|
[
"02c0126581bfdf95885ba55aa12f23de4cf2c12e"
] |
[
"nnet/py_factory.py"
] |
[
"import os\nimport torch\nimport importlib\nimport torch.nn as nn\nimport json\n\n\nfrom config import system_configs\nfrom models.py_utils.data_parallel import DataParallel\nfrom db.datasets import datasets\n\ntorch.manual_seed(317)\n\nclass Network(nn.Module):\n def __init__(self, model, loss):\n super(Network, self).__init__()\n\n self.model = model\n self.loss = loss\n\n def forward(self, xs, ys, **kwargs):\n preds = self.model(*xs, **kwargs)\n loss = self.loss(preds, ys, **kwargs)\n return loss\n\n# for model backward compatibility\n# previously model was wrapped by DataParallel module\nclass DummyModule(nn.Module):\n def __init__(self, model):\n super(DummyModule, self).__init__()\n self.module = model\n\n def forward(self, *xs, **kwargs):\n return self.module(*xs, **kwargs)\n\n def load_my_state_dict(self, state_dict, strict=False):\n own_state = self.state_dict()\n for name, param in state_dict.items():\n if name not in own_state:\n print(\"unkown name\")\n continue\n # if isinstance(param, Parameter):\n # print(\"instance\")\n # # backwards compatibility for serialized parameters\n # param = param.data\n if (own_state[name].size() == param.size()):\n print(\"copy\")\n own_state[name].copy_(param)\n else:\n print(\"andere size\")\n\nclass NetworkFactory(object):\n def __init__(self, db):\n super(NetworkFactory, self).__init__()\n\n module_file = \"models.{}\".format(system_configs.snapshot_name)\n print(\"module_file: {}\".format(module_file))\n nnet_module = importlib.import_module(module_file)\n\n self.model = DummyModule(nnet_module.model(db))\n self.loss = nnet_module.loss\n self.network = Network(self.model, self.loss)\n self.network = DataParallel(self.network, chunk_sizes=system_configs.chunk_sizes)\n\n total_params = 0\n for params in self.model.parameters():\n num_params = 1\n for x in params.size():\n num_params *= x\n total_params += num_params\n print(\"total parameters: {}\".format(total_params))\n\n if system_configs.opt_algo == \"adam\":\n self.optimizer = torch.optim.Adam(\n filter(lambda p: p.requires_grad, self.model.parameters())\n )\n elif system_configs.opt_algo == \"sgd\":\n self.optimizer = torch.optim.SGD(\n filter(lambda p: p.requires_grad, self.model.parameters()),\n lr=system_configs.learning_rate, \n momentum=0.9, weight_decay=0.0001\n )\n else:\n raise ValueError(\"unknown optimizer\")\n\n def cuda(self):\n self.model.cuda()\n\n def train_mode(self):\n self.network.train()\n\n def eval_mode(self):\n self.network.eval()\n\n def train(self, xs, ys, **kwargs):\n xs = [x.cuda(non_blocking=True) for x in xs]\n ys = [y.cuda(non_blocking=True) for y in ys]\n\n self.optimizer.zero_grad()\n loss = self.network(xs, ys)\n loss = loss.mean()\n loss.backward()\n self.optimizer.step()\n return loss\n\n def validate(self, xs, ys, **kwargs):\n with torch.no_grad():\n xs = [x.cuda(non_blocking=True) for x in xs]\n ys = [y.cuda(non_blocking=True) for y in ys]\n\n loss = self.network(xs, ys)\n loss = loss.mean()\n return loss\n\n def test(self, xs, **kwargs):\n with torch.no_grad():\n xs = [x.cuda(non_blocking=True) for x in xs]\n return self.model(*xs, **kwargs)\n\n def set_lr(self, lr):\n print(\"setting learning rate to: {}\".format(lr))\n for param_group in self.optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n def load_pretrained_params(self, pretrained_model):\n print(\"loading pretrain from {}\".format(pretrained_model))\n with open(pretrained_model, \"rb\") as f:\n params = torch.load(f)\n self.model.load_my_state_dict(params, strict=False)\n\n def load_params(self, iteration):\n cache_file = system_configs.snapshot_file.format(iteration)\n print(\"loading model from {}\".format(cache_file))\n with open(cache_file, \"rb\") as f:\n params = torch.load(f)\n self.model.load_state_dict(params)\n\n def save_params(self, iteration):\n cache_file = system_configs.snapshot_file.format(iteration)\n print(\"saving model to {}\".format(cache_file))\n with open(cache_file, \"wb\") as f:\n params = self.model.state_dict()\n torch.save(params, f)\n\n def calculate_bboxes(self, cfg_file, iteration):\n print(\"JOEJOE\")\n with open(cfg_file, \"r\") as f:\n configs = json.load(f)\n\n train_split = system_configs.train_split\n val_split = system_configs.val_split\n test_split = system_configs.test_split\n\n split = {\n \"training\": train_split,\n \"validation\": val_split,\n \"testing\": test_split\n }[\"validation\"]\n\n dataset = system_configs.dataset\n\n testing_db = datasets[dataset](configs[\"db\"], split)\n\n test_file = \"test.{}\".format(testing_db.data)\n testing = importlib.import_module(test_file).testing\n\n result_dir = system_configs.result_dir\n result_dir = os.path.join(result_dir, str(iteration), split)\n\n if not os.path.exists(result_dir):\n os.makedirs(result_dir)\n \n testing(testing_db, self, result_dir, iteration)"
] |
[
[
"torch.manual_seed",
"torch.save",
"torch.no_grad",
"torch.load"
]
] |
KevinXie86/AutowareArchitectureProposal.iv
|
[
"28c8c367652bcc5c2548abdab732e0fea7302653"
] |
[
"planning/scenario_planning/common/motion_velocity_optimizer/scripts/trajectory_visualizer.py"
] |
[
"#!/usr/bin/env python3\n\n# Copyright 2020 Tier IV, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\n\nfrom autoware_planning_msgs.msg import Path\nfrom autoware_planning_msgs.msg import PathWithLaneId\nfrom autoware_planning_msgs.msg import Trajectory\nfrom geometry_msgs.msg import Pose\nfrom geometry_msgs.msg import Twist\nfrom geometry_msgs.msg import TwistStamped\nfrom matplotlib import animation\nimport matplotlib.pyplot as plt\nimport message_filters\nimport numpy as np\nimport rclpy\nfrom rclpy.node import Node\nimport tf2_ros\nfrom tf2_ros.buffer import Buffer\nfrom tf2_ros.transform_listener import TransformListener\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-l', '--length', help='max arclength in plot')\nparser.add_argument(\n '-t', '--type',\n help='Options VEL(default): show velocity only, VEL_ACC_JERK: show vel & acc & jerk'\n)\n\nargs = parser.parse_args()\n\nif args.length is None:\n PLOT_MAX_ARCLENGTH = 200\nelse:\n PLOT_MAX_ARCLENGTH = int(args.length)\nprint('max arclength = ' + str(PLOT_MAX_ARCLENGTH))\n\nif args.type is None:\n PLOT_TYPE = 'VEL'\nelif args.type == 'VEL':\n PLOT_TYPE = 'VEL'\nelif args.type == 'VEL_ACC_JERK':\n PLOT_TYPE = 'VEL_ACC_JERK'\nelse:\n print('invalid type. set default VEL.')\n PLOT_TYPE = 'VEL'\nprint('plot type = ' + PLOT_TYPE)\n\nPATH_ORIGIN_FRAME = 'map'\nSELF_POSE_FRAME = 'base_link'\n\n\nclass TrajectoryVisualizer(Node):\n\n def __init__(self):\n\n super().__init__('trajectory_visualizer')\n\n self.fig = plt.figure()\n\n self.max_vel = 0.0\n self.min_vel = 0.0\n self.min_accel = 0.0\n self.max_accel = 0.0\n self.min_jerk = 0.0\n self.max_jerk = 0.0\n\n # update flag\n self.update_ex_vel_lim = False\n self.update_lat_acc_fil = False\n self.update_traj_raw = False\n self.update_traj_resample = False\n self.update_traj_final = False\n self.update_lanechange_path = False\n self.update_behavior_path = False\n self.update_traj_ob_avoid = False\n self.update_traj_ob_stop = False\n\n self.tf_buffer = Buffer(node=self)\n self.tf_listener = TransformListener(\n self.tf_buffer, self, spin_thread=True)\n\n self.self_pose = Pose()\n self.self_pose_received = False\n self.localization_twist = Twist()\n self.vehicle_twist = Twist()\n\n self.trajectory_external_velocity_limited = Trajectory()\n self.trajectory_lateral_acc_filtered = Trajectory()\n self.trajectory_raw = Trajectory()\n self.trajectory_time_resampled = Trajectory()\n self.trajectory_final = Trajectory()\n\n self.lane_change_path = PathWithLaneId()\n self.behavior_path = Path()\n self.obstacle_avoid_traj = Trajectory()\n self.obstacle_stop_traj = Trajectory()\n\n self.plotted = [False] * 9\n self.sub_localization_twist = self.create_subscription(\n TwistStamped, '/localization/twist', self.CallbackLocalizationTwist, 1)\n self.sub_vehicle_twist = self.create_subscription(\n TwistStamped, '/vehicle/status/twist', self.CallbackVehicleTwist, 1)\n\n # BUFFER_SIZE = 65536*100\n optimizer_debug = '/planning/scenario_planning/motion_velocity_optimizer/debug/'\n self.sub1 = message_filters.Subscriber(\n self, Trajectory, optimizer_debug + 'trajectory_external_velocity_limited')\n self.sub2 = message_filters.Subscriber(\n self, Trajectory, optimizer_debug + 'trajectory_lateral_acc_filtered')\n self.sub3 = message_filters.Subscriber(\n self, Trajectory, optimizer_debug + 'trajectory_raw')\n self.sub4 = message_filters.Subscriber(\n self, Trajectory, optimizer_debug + 'trajectory_time_resampled')\n self.sub5 = message_filters.Subscriber(\n self, Trajectory, '/planning/scenario_planning/trajectory')\n\n lane_driving = '/planning/scenario_planning/lane_driving'\n self.sub6 = message_filters.Subscriber(\n self, PathWithLaneId, lane_driving + '/behavior_planning/path_with_lane_id')\n self.sub7 = message_filters.Subscriber(\n self, Path, lane_driving + '/behavior_planning/path')\n self.sub8 = message_filters.Subscriber(\n self,\n Trajectory,\n lane_driving +\n '/motion_planning/obstacle_avoidance_planner/trajectory')\n self.sub9 = message_filters.Subscriber(\n self, Trajectory, '/planning/scenario_planning/trajectory')\n\n self.ts1 = message_filters.ApproximateTimeSynchronizer(\n [self.sub1, self.sub2, self.sub3, self.sub4, self.sub5], 30, 0.5)\n self.ts1.registerCallback(self.CallbackMotionVelOptTraj)\n self.ts2 = message_filters.ApproximateTimeSynchronizer(\n [self.sub6, self.sub7, self.sub8, self.sub9], 30, 1, 0)\n self.ts2.registerCallback(self.CallBackLaneDrivingTraj)\n\n # main process\n if PLOT_TYPE == 'VEL_ACC_JERK':\n self.ani = animation.FuncAnimation(\n self.fig, self.plotTrajectory, interval=100, blit=True)\n self.setPlotTrajectory()\n else:\n self.ani = animation.FuncAnimation(\n self.fig, self.plotTrajectoryVelocity, interval=100, blit=True)\n self.setPlotTrajectoryVelocity()\n\n plt.show()\n\n return\n\n def test(self):\n self.updatePose('map', 'base_link')\n\n def CallbackLocalizationTwist(self, cmd):\n self.localization_twist = cmd.twist\n\n def CallbackVehicleTwist(self, cmd):\n self.vehicle_twist = cmd.twist\n\n def CallbackMotionVelOptTraj(self, cmd1, cmd2, cmd3, cmd4, cmd5):\n self.CallBackTrajExVelLim(cmd1)\n self.CallBackTrajLatAccFiltered(cmd2)\n self.CallBackTrajRaw(cmd3)\n self.CallBackTrajTimeResampled(cmd4)\n self.CallBackTrajFinal(cmd5)\n\n def CallBackTrajExVelLim(self, cmd):\n self.trajectory_external_velocity_limited = cmd\n self.update_ex_vel_lim = True\n\n def CallBackTrajLatAccFiltered(self, cmd):\n self.trajectory_lateral_acc_filtered = cmd\n self.update_lat_acc_fil = True\n\n def CallBackTrajRaw(self, cmd):\n self.trajectory_raw = cmd\n self.update_traj_raw = True\n\n def CallBackTrajTimeResampled(self, cmd):\n self.trajectory_time_resampled = cmd\n self.update_traj_resample = True\n\n def CallBackTrajFinal(self, cmd):\n self.trajectory_final = cmd\n self.update_traj_final = True\n\n def CallBackLaneDrivingTraj(self, cmd6, cmd7, cmd8, cmd9):\n self.CallBackLaneChangePath(cmd6)\n self.CallBackBehaviorPath(cmd7)\n self.CallbackObstacleAvoidTraj(cmd8)\n self.CallbackObstacleStopTraj(cmd9)\n\n def CallBackLaneChangePath(self, cmd):\n self.lane_change_path = cmd\n self.update_lanechange_path = True\n\n def CallBackBehaviorPath(self, cmd):\n self.behavior_path = cmd\n self.update_behavior_path = True\n\n def CallbackObstacleAvoidTraj(self, cmd):\n self.obstacle_avoid_traj = cmd\n self.update_traj_ob_avoid = True\n\n def CallbackObstacleStopTraj(self, cmd):\n self.obstacle_stop_traj = cmd\n self.update_traj_ob_stop = True\n\n def setPlotTrajectoryVelocity(self):\n self.ax1 = plt.subplot(1, 1, 1) # row, col, index(<raw*col)\n self.im1, = self.ax1.plot(\n [], [], label='0: lane_change_path', marker='')\n self.im2, = self.ax1.plot(\n [], [], label='1: behavior_path', marker='', ls='--')\n self.im3, = self.ax1.plot(\n [], [], label='2: obstacle_avoid_traj', marker='', ls='-.')\n self.im4, = self.ax1.plot(\n [], [], label='3: obstacle_stop_traj', marker='', ls='--')\n self.im5, = self.ax1.plot(\n [], [], label='4-1: opt input', marker='', ls='--')\n self.im6, = self.ax1.plot(\n [], [], label='4-2: opt external_velocity_limited', marker='', ls='--')\n self.im7, = self.ax1.plot(\n [], [], label='4-3: opt lat_acc_filtered', marker='', ls='--')\n self.im8, = self.ax1.plot(\n [], [], label='4-4: opt time_resampled', marker='', ls='--')\n self.im9, = self.ax1.plot(\n [], [], label='4-5: opt final', marker='', ls='-')\n self.im10, = self.ax1.plot(\n [], [], label='localization twist vx', color='r', marker='*', ls=':', markersize=10)\n self.im11, = self.ax1.plot(\n [], [], label='vehicle twist vx', color='k', marker='+', ls=':', markersize=10)\n self.ax1.set_title(\"trajectory's velocity\")\n self.ax1.legend()\n self.ax1.set_xlim([0, PLOT_MAX_ARCLENGTH])\n self.ax1.set_ylabel('vel [m/s]')\n\n return self.im1, self.im2, self.im3, self.im4, self.im5, \\\n self.im6, self.im7, self.im8, self.im9, self.im10, self.im11\n\n def plotTrajectoryVelocity(self, data):\n self.updatePose(PATH_ORIGIN_FRAME, SELF_POSE_FRAME)\n if self.self_pose_received is False:\n print('plot start but self pose is not received')\n return self.im1, self.im2, self.im3, self.im4, self.im5, \\\n self.im6, self.im7, self.im8, self.im9, self.im10, self.im11\n print('plot start')\n\n # copy\n lane_change_path = self.lane_change_path\n behavior_path = self.behavior_path\n obstacle_avoid_traj = self.obstacle_avoid_traj\n obstacle_stop_traj = self.obstacle_stop_traj\n trajectory_raw = self.trajectory_raw\n trajectory_external_velocity_limited = self.trajectory_external_velocity_limited\n trajectory_lateral_acc_filtered = self.trajectory_lateral_acc_filtered\n trajectory_time_resampled = self.trajectory_time_resampled\n trajectory_final = self.trajectory_final\n\n if self.update_lanechange_path:\n x = self.CalcArcLengthPathWLid(lane_change_path)\n y = self.ToVelListPathWLid(lane_change_path)\n self.im1.set_data(x, y)\n self.update_lanechange_path = False\n if len(y) != 0:\n self.max_vel = max(10.0, np.max(y))\n self.min_vel = np.min(y)\n\n if self.update_behavior_path:\n x = self.CalcArcLengthPath(behavior_path)\n y = self.ToVelListPath(behavior_path)\n self.im2.set_data(x, y)\n self.update_behavior_path = False\n\n if self.update_traj_ob_avoid:\n x = self.CalcArcLength(obstacle_avoid_traj)\n y = self.ToVelList(obstacle_avoid_traj)\n self.im3.set_data(x, y)\n self.update_traj_ob_avoid = False\n\n if self.update_traj_ob_stop:\n x = self.CalcArcLength(obstacle_stop_traj)\n y = self.ToVelList(obstacle_stop_traj)\n self.im4.set_data(x, y)\n self.update_traj_ob_stop = False\n\n if self.update_traj_raw:\n x = self.CalcArcLength(trajectory_raw)\n y = self.ToVelList(trajectory_raw)\n self.im5.set_data(x, y)\n self.update_traj_raw = False\n\n if self.update_ex_vel_lim:\n x = self.CalcArcLength(trajectory_external_velocity_limited)\n y = self.ToVelList(trajectory_external_velocity_limited)\n self.im6.set_data(x, y)\n self.update_ex_vel_lim = False\n\n if self.update_lat_acc_fil:\n x = self.CalcArcLength(trajectory_lateral_acc_filtered)\n y = self.ToVelList(trajectory_lateral_acc_filtered)\n self.im7.set_data(x, y)\n self.update_lat_acc_fil = False\n\n if self.update_traj_resample:\n x = self.CalcArcLength(trajectory_time_resampled)\n y = self.ToVelList(trajectory_time_resampled)\n self.im8.set_data(x, y)\n self.update_traj_resample = False\n\n if self.update_traj_final:\n x = self.CalcArcLength(trajectory_final)\n y = self.ToVelList(trajectory_final)\n self.im9.set_data(x, y)\n self.update_traj_final = False\n\n closest = self.calcClosestTrajectory(trajectory_final)\n if closest >= 0:\n x_closest = x[closest]\n self.im10.set_data(x_closest, self.localization_twist.linear.x)\n self.im11.set_data(x_closest, self.vehicle_twist.linear.x)\n\n # change y-range\n self.ax1.set_ylim([self.min_vel - 1.0, self.max_vel + 1.0])\n\n return self.im1, self.im2, self.im3, self.im4, self.im5, \\\n self.im6, self.im7, self.im8, self.im9, self.im10, self.im11\n\n def CalcArcLength(self, traj):\n s_arr = []\n ds = 0.0\n s_sum = 0.0\n\n if len(traj.points) > 0:\n s_arr.append(s_sum)\n\n for i in range(1, len(traj.points)):\n p0 = traj.points[i - 1]\n p1 = traj.points[i]\n dx = p1.pose.position.x - p0.pose.position.x\n dy = p1.pose.position.y - p0.pose.position.y\n ds = np.sqrt(dx**2 + dy**2)\n s_sum += ds\n s_arr.append(s_sum)\n return s_arr\n\n def CalcArcLengthPathWLid(self, traj):\n s_arr = []\n ds = 0.0\n s_sum = 0.0\n\n if len(traj.points) > 0:\n s_arr.append(s_sum)\n\n for i in range(1, len(traj.points)):\n p0 = traj.points[i - 1].point\n p1 = traj.points[i].point\n dx = p1.pose.position.x - p0.pose.position.x\n dy = p1.pose.position.y - p0.pose.position.y\n ds = np.sqrt(dx**2 + dy**2)\n s_sum += ds\n s_arr.append(s_sum)\n return s_arr\n\n def CalcArcLengthPath(self, traj):\n s_arr = []\n ds = 0.0\n s_sum = 0.0\n\n if len(traj.points) > 0:\n s_arr.append(s_sum)\n\n for i in range(1, len(traj.points)):\n p0 = traj.points[i - 1]\n p1 = traj.points[i]\n dx = p1.pose.position.x - p0.pose.position.x\n dy = p1.pose.position.y - p0.pose.position.y\n ds = np.sqrt(dx**2 + dy**2)\n s_sum += ds\n s_arr.append(s_sum)\n return s_arr\n\n def ToVelList(self, traj):\n v_list = []\n for p in traj.points:\n v_list.append(p.twist.linear.x)\n return v_list\n\n def ToVelListPathWLid(self, traj):\n v_list = []\n for p in traj.points:\n v_list.append(p.point.twist.linear.x)\n return v_list\n\n def ToVelListPath(self, traj):\n v_list = []\n for p in traj.points:\n v_list.append(p.twist.linear.x)\n return v_list\n\n def CalcAcceleration(self, traj):\n a_arr = []\n for i in range(1, len(traj.points) - 1):\n p0 = traj.points[i - 1]\n p1 = traj.points[i]\n v0 = p0.twist.linear.x\n v1 = p1.twist.linear.x\n v = 0.5 * (v1 + v0)\n dx = p1.pose.position.x - p0.pose.position.x\n dy = p1.pose.position.y - p0.pose.position.y\n ds = np.sqrt(dx**2 + dy**2)\n dt = ds / max(abs(v), 0.001)\n a = (v1 - v0) / dt\n a_arr.append(a)\n if len(traj.points) > 0:\n a_arr.append(0)\n a_arr.append(0)\n return a_arr\n\n def CalcJerk(self, traj):\n j_arr = []\n for i in range(1, len(traj.points) - 2):\n p0 = traj.points[i - 1]\n p1 = traj.points[i]\n p2 = traj.points[i + 1]\n v0 = p0.twist.linear.x\n v1 = p1.twist.linear.x\n v2 = p2.twist.linear.x\n\n dx0 = p1.pose.position.x - p0.pose.position.x\n dy0 = p1.pose.position.y - p0.pose.position.y\n ds0 = np.sqrt(dx0**2 + dy0**2)\n\n dx1 = p2.pose.position.x - p1.pose.position.x\n dy1 = p2.pose.position.y - p1.pose.position.y\n ds1 = np.sqrt(dx1**2 + dy1**2)\n\n dt0 = ds0 / max(abs(0.5 * (v1 + v0)), 0.001)\n dt1 = ds1 / max(abs(0.5 * (v2 + v1)), 0.001)\n\n a0 = (v1 - v0) / max(dt0, 0.001)\n a1 = (v2 - v1) / max(dt1, 0.001)\n j = (a1 - a0) / max(dt1, 0.001)\n j_arr.append(j)\n if len(traj.points) > 0:\n j_arr.append(0)\n j_arr.append(0)\n j_arr.append(0)\n return j_arr\n\n def setPlotTrajectory(self):\n self.ax1 = plt.subplot(3, 1, 1) # row, col, index(<raw*col)\n self.im0, = self.ax1.plot([], [], label='0: raw', marker='')\n self.im1, = self.ax1.plot([], [], label='3: time_resampled', marker='')\n self.im2, = self.ax1.plot([], [], label='4: final velocity', marker='')\n self.ax1.set_title(\"trajectory's velocity\")\n self.ax1.legend()\n self.ax1.set_xlim([0, PLOT_MAX_ARCLENGTH])\n self.ax1.set_ylabel('vel [m/s]')\n\n self.ax2 = plt.subplot(3, 1, 2)\n self.ax2.set_xlim([0, PLOT_MAX_ARCLENGTH])\n self.ax2.set_ylim([-1, 1])\n self.ax2.set_ylabel('acc [m/ss]')\n self.im3, = self.ax2.plot([], [], label='final accel')\n\n self.ax3 = plt.subplot(3, 1, 3)\n self.ax3.set_xlim([0, PLOT_MAX_ARCLENGTH])\n self.ax3.set_ylim([-2, 2])\n self.ax3.set_xlabel('arclength [m]')\n self.ax3.set_ylabel('jerk [m/sss]')\n self.im4, = self.ax3.plot([], [], label='final jerk')\n\n return self.im0, self.im1, self.im2, self.im3, self.im4\n\n def plotTrajectory(self, data):\n print('plot called')\n self.updatePose(PATH_ORIGIN_FRAME, SELF_POSE_FRAME)\n\n # copy\n trajectory_raw = self.trajectory_raw\n # trajectory_external_velocity_limited = self.trajectory_external_velocity_limited\n # trajectory_lateral_acc_filtered = self.trajectory_lateral_acc_filtered\n trajectory_time_resampled = self.trajectory_time_resampled\n trajectory_final = self.trajectory_final\n\n # ax1\n if self.update_traj_raw:\n x = self.CalcArcLength(trajectory_raw)\n y = self.ToVelList(trajectory_raw)\n self.im0.set_data(x, y)\n self.update_traj_raw = False\n if len(y) != 0:\n self.max_vel = max(10.0, np.max(y))\n self.min_vel = np.min(y)\n # change y-range\n self.ax1.set_ylim([self.min_vel - 1.0, self.max_vel + 1.0])\n\n if self.update_traj_resample:\n x = self.CalcArcLength(trajectory_time_resampled)\n y = self.ToVelList(trajectory_time_resampled)\n self.im1.set_data(x, y)\n self.update_traj_resample = False\n\n if self.update_traj_final:\n x = self.CalcArcLength(trajectory_final)\n y = self.ToVelList(trajectory_final)\n self.im2.set_data(x, y)\n self.update_traj_final = False\n\n # ax2\n y = self.CalcAcceleration(trajectory_final)\n if len(y) != 0:\n self.max_accel = max(0.0, np.max(y))\n self.min_accel = min(0.0, np.min(y))\n # change y-range\n self.ax2.set_ylim([self.min_accel - 1.0, self.max_accel + 1.0])\n if len(x) == len(y):\n self.im3.set_data(x, y)\n\n # ax3\n y = self.CalcJerk(trajectory_final)\n if len(y) != 0:\n self.max_jerk = max(0.0, np.max(y))\n self.min_jerk = min(0.0, np.min(y))\n # change y-range\n # self.ax3.set_ylim([self.min_jerk - 1.0, self.max_jerk + 1.0])\n self.ax3.set_ylim([-2.0, 2.0]) # fixed range\n if len(x) == len(y):\n self.im4.set_data(x, y)\n\n return self.im0, self.im1, self.im2, self.im3, self.im4\n\n def calcClosestPath(self, path):\n closest = -1\n min_dist_squared = 1.0e10\n for i in range(0, len(path.points)):\n dist_sq = self.calcSquaredDist2d(\n self.self_pose, path.points[i].pose)\n if dist_sq < min_dist_squared:\n min_dist_squared = dist_sq\n closest = i\n return closest\n\n def calcClosestPathWLid(self, path):\n closest = -1\n min_dist_squared = 1.0e10\n for i in range(0, len(path.points)):\n dist_sq = self.calcSquaredDist2d(\n self.self_pose, path.points[i].point.pose)\n if dist_sq < min_dist_squared:\n min_dist_squared = dist_sq\n closest = i\n return closest\n\n def calcClosestTrajectory(self, path):\n closest = -1\n min_dist_squared = 1.0e10\n for i in range(0, len(path.points)):\n dist_sq = self.calcSquaredDist2d(\n self.self_pose, path.points[i].pose)\n if dist_sq < min_dist_squared:\n min_dist_squared = dist_sq\n closest = i\n return closest\n\n def calcSquaredDist2d(self, p1, p2):\n dx = p1.position.x - p2.position.x\n dy = p1.position.y - p2.position.y\n return dx * dx + dy * dy\n\n def updatePose(self, from_link, to_link):\n try:\n tf = self.tf_buffer.lookup_transform(\n from_link, to_link, rclpy.time.Time())\n self.self_pose.position.x = tf.transform.translation.x\n self.self_pose.position.y = tf.transform.translation.y\n self.self_pose.position.z = tf.transform.translation.z\n self.self_pose.orientation.x = tf.transform.rotation.x\n self.self_pose.orientation.y = tf.transform.rotation.y\n self.self_pose.orientation.z = tf.transform.rotation.z\n self.self_pose.orientation.w = tf.transform.rotation.w\n print('updatePose succeeded')\n self.self_pose_received = True\n return\n except tf2_ros.TransformException:\n self.get_logger().warn(\n 'lookup transform failed: from {} to {}'.format(\n from_link, to_link))\n return\n\n def closeFigure(self):\n plt.close(self.fig)\n\n\ndef main(args=None):\n try:\n rclpy.init(args=args)\n node = TrajectoryVisualizer()\n rclpy.spin(node)\n except KeyboardInterrupt:\n pass\n finally:\n node.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.sqrt",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.subplot",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.close",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
lopez-lab/PyRAI2MD
|
[
"43e27fbc9bc5b6ab6a8f170791951f316fcd0964"
] |
[
"PyRAI2MD/Machine_Learning/pyNNsMD/nn_pes_src/training/training_mlp_g2.py"
] |
[
"\"\"\"\nThe main training script for energy gradient model. Called with ArgumentParse.\n\"\"\"\n# from sklearn.utils import shuffle\n# import time\nimport matplotlib as mpl\nimport numpy as np\nimport tensorflow as tf\n\nmpl.use('Agg')\nimport os\nimport json\nimport pickle\nimport sys\nsys.path.append('%s/PyRAI2MD/Machine_Learning' % os.environ['PYRAI2MD'])\nimport argparse\n\nparser = argparse.ArgumentParser(description='Train a energy-gradient model from data, parameters given in a folder')\n\nparser.add_argument(\"-i\", \"--index\", required=True, help=\"Index of the NN to train\")\nparser.add_argument(\"-f\", \"--filepath\", required=True, help=\"Filepath to weights, hyperparameter, data etc. \")\nparser.add_argument(\"-g\", \"--gpus\", default=-1, required=True, help=\"Index of gpu to use\")\nparser.add_argument(\"-m\", \"--mode\", default=\"training\", required=True, help=\"Which mode to use train or retrain\")\nargs = vars(parser.parse_args())\n# args = {\"filepath\":\"E:/Benutzer/Patrick/PostDoc/Projects ML/NeuralNet4/NNfit0/energy_gradient_0\",'index' : 0,\"gpus\":0}\n\n\nfstdout = open(os.path.join(args['filepath'], \"fitlog_\" + str(args['index']) + \".txt\"), 'w')\nsys.stderr = fstdout\nsys.stdout = fstdout\n\nprint(\"Input argpars:\", args)\n\nfrom pyNNsMD.nn_pes_src.device import set_gpu\n\nset_gpu([int(args['gpus'])])\nprint(\"Logic Devices:\", tf.config.experimental.list_logical_devices('GPU'))\n\nfrom pyNNsMD.utils.callbacks import EarlyStopping, lr_lin_reduction, lr_exp_reduction, lr_step_reduction\nfrom pyNNsMD.models.mlp_g2 import GradientModel2\n# from pyNNsMD.nn_pes_src.legacy import compute_feature_derivative\nfrom pyNNsMD.datasets.general import load_hyp\nfrom pyNNsMD.datasets.general import split_validation_training_index\n# from pyNNsMD.nn_pes_src.scaler import save_std_scaler_dict\nfrom pyNNsMD.scaler.energy import GradientStandardScaler\nfrom pyNNsMD.scaler.general import SegmentStandardScaler\nfrom pyNNsMD.utils.loss import get_lr_metric, ScaledMeanAbsoluteError, r2_metric\nfrom pyNNsMD.plots.loss import plot_loss_curves, plot_learning_curve\nfrom pyNNsMD.plots.pred import plot_scatter_prediction\nfrom pyNNsMD.plots.error import plot_error_vec_mean, plot_error_vec_max\n\n\ndef train_model_energy_gradient(i=0, outdir=None, mode='training'):\n \"\"\"\n Train an energy plus gradient model. Uses precomputed feature and model representation.\n\n Args:\n i (int, optional): Model index. The default is 0.\n outdir (str, optional): Direcotry for fit output. The default is None.\n mode (str, optional): Fitmode to take from hyperparameters. The default is 'training'.\n\n Raises:\n ValueError: Wrong input shape.\n\n Returns:\n error_val (list): Validation error for (energy,gradient).\n\n \"\"\"\n i = int(i)\n # Load everything from folder\n try:\n with open(os.path.join(outdir, 'data_y'), 'rb') as f:\n y = pickle.load(f)\n with open(os.path.join(outdir, 'data_x'), 'rb') as f:\n x = pickle.load(f)\n except:\n print(\"Error: Can not load data for fit\", outdir)\n return\n hyperall = None\n try:\n hyperall = load_hyp(os.path.join(outdir, 'hyper' + '_v%i' % i + \".json\"))\n except:\n print(\"Error: Can not load hyper for fit\", outdir)\n\n scaler = GradientStandardScaler()\n try:\n scaler.load(os.path.join(outdir, 'scaler' + '_v%i' % i + \".json\"))\n except:\n print(\"Error: Can not load scaler info for fit\", outdir)\n\n # Model\n hypermodel = hyperall['model']\n # plots\n unit_label_grad = hyperall['plots']['unit_gradient']\n # Fit\n hyper = hyperall[mode]\n epo = hyper['epo']\n batch_size = hyper['batch_size']\n epostep = hyper['epostep']\n val_disjoint = hyper['val_disjoint']\n val_split = hyper['val_split']\n initialize_weights = hyper['initialize_weights']\n learning_rate = hyper['learning_rate']\n loss_weights = hyper['loss_weights']\n auto_scale = hyper['auto_scaling']\n normalize_feat = int(hyper['normalization_mode'])\n # step\n use_step_callback = hyper['step_callback']\n use_linear_callback = hyper['linear_callback']\n use_exp_callback = hyper['exp_callback']\n use_early_callback = hyper['early_callback']\n\n # Data Check here:\n if (len(x.shape) != 3):\n raise ValueError(\"Input x-shape must be (batch,atoms,3)\")\n else:\n print(\"Found x-shape of\", x.shape)\n if (len(y.shape) != 4):\n raise ValueError(\"Input gradient-shape must be (batch,states,atoms,3)\")\n else:\n print(\"Found gradient-shape of\", y.shape)\n\n # Fit stats dir\n dir_save = os.path.join(outdir, \"fit_stats\")\n os.makedirs(dir_save, exist_ok=True)\n\n # cbks,Learning rate schedule\n cbks = []\n if use_early_callback['use']:\n es_cbk = EarlyStopping(**use_early_callback)\n cbks.append(es_cbk)\n if use_linear_callback['use']:\n lr_sched = lr_lin_reduction(**use_linear_callback)\n lr_cbk = tf.keras.callbacks.LearningRateScheduler(lr_sched)\n cbks.append(lr_cbk)\n if use_exp_callback['use']:\n lr_exp = lr_exp_reduction(**use_exp_callback)\n exp_cbk = tf.keras.callbacks.LearningRateScheduler(lr_exp)\n cbks.append(exp_cbk)\n if use_step_callback['use']:\n lr_step = lr_step_reduction(**use_step_callback)\n step_cbk = tf.keras.callbacks.LearningRateScheduler(lr_step)\n cbks.append(step_cbk)\n\n # Index train test split\n lval = int(len(x) * val_split)\n allind = np.arange(0, len(x))\n i_train, i_val = split_validation_training_index(allind, lval, val_disjoint, i)\n print(\"Info: Train-Test split at Train:\", len(i_train), \"Test\", len(i_val), \"Total\", len(x))\n\n # Make all Model\n out_model = GradientModel2(**hypermodel)\n out_model.precomputed_features = True\n\n # Look for loading weights\n npeps = np.finfo(float).eps\n if not initialize_weights:\n try:\n out_model.load_weights(os.path.join(outdir, \"weights\" + '_v%i' % i + '.h5'))\n print(\"Info: Load old weights at:\", os.path.join(outdir, \"weights\" + '_v%i' % i + '.h5'))\n print(\"Info: Transferring weights...\")\n except:\n print(\"Error: Can't load old weights...\")\n else:\n print(\"Info: Making new initialized weights.\")\n\n # Scale x,y\n scaler.fit(x, y, auto_scale=auto_scale)\n x_rescale, y_rescale = scaler.transform(x, y)\n y1 = y_rescale\n\n # Model + Model precompute layer +feat\n feat_x, feat_grad = out_model.precompute_feature_in_chunks(x_rescale, batch_size=batch_size)\n # Finding Normalization\n feat_x_mean, feat_x_std = out_model.set_const_normalization_from_features(feat_x,normalization_mode=normalize_feat)\n\n # Train Test split\n xtrain = [feat_x[i_train], feat_grad[i_train]]\n ytrain = y1[i_train]\n xval = [feat_x[i_val], feat_grad[i_val]]\n yval = y1[i_val]\n\n # Setting constant feature normalization\n optimizer = tf.keras.optimizers.Adam(lr=learning_rate)\n lr_metric = get_lr_metric(optimizer)\n mae_force = ScaledMeanAbsoluteError(scaling_shape=scaler.gradient_std.shape)\n mae_force.set_scale(scaler.gradient_std)\n out_model.compile(optimizer=optimizer,\n loss='mean_squared_error',\n metrics=[mae_force, lr_metric, r2_metric])\n\n scaler.print_params_info()\n print(\"Info: Using feature-scale\", feat_x_std.shape, \":\", feat_x_std)\n print(\"Info: Using feature-offset\", feat_x_mean.shape, \":\", feat_x_mean)\n\n print(\"\")\n print(\"Start fit.\")\n out_model.summary()\n hist = out_model.fit(x=xtrain, y=ytrain, epochs=epo, batch_size=batch_size,\n callbacks=cbks, validation_freq=epostep,\n validation_data=(xval,yval), verbose=2)\n print(\"End fit.\")\n print(\"\")\n\n try:\n outname = os.path.join(dir_save, \"history_\" + \".json\")\n outhist = {a: np.array(b, dtype=np.float64).tolist() for a, b in hist.history.items()}\n with open(outname, 'w') as f:\n json.dump(outhist, f)\n except:\n print(\"Warning: Cant save history\")\n\n try:\n out_model.save_weights(os.path.join(outdir, \"weights\" + '_v%i' % i + '.h5'))\n # print(out_model.get_weights())\n except:\n print(\"Warning: Cant save weights\")\n\n try:\n print(\"Info: Saving auto-scaler to file...\")\n scaler.save(os.path.join(outdir, \"scaler\" + '_v%i' % i + '.json'))\n except:\n print(\"Error: Can not export scaler info. Model prediciton will be wrongly scaled.\")\n\n try:\n # Plot and Save\n yval_plot = y[i_val]\n ytrain_plot = y[i_train]\n # Convert back scaler\n pval = out_model.predict(xval)\n ptrain = out_model.predict(xtrain)\n _, pval = scaler.inverse_transform(y=pval)\n _, ptrain = scaler.inverse_transform(y=ptrain)\n\n print(\"Info: Predicted Gradient shape:\", ptrain.shape)\n print(\"Info: Plot fit stats...\")\n\n # Plot\n plot_loss_curves(hist.history['mean_absolute_error'],\n hist.history['val_mean_absolute_error'],\n label_curves=[\"force\"],\n val_step=epostep, save_plot_to_file=True, dir_save=dir_save,\n filename='fit' + str(i), filetypeout='.png', unit_loss=unit_label_grad, loss_name=\"MAE\",\n plot_title=\"Force\")\n\n plot_learning_curve(hist.history['lr'], filename='fit' + str(i), dir_save=dir_save)\n\n\n plot_scatter_prediction(pval, yval_plot, save_plot_to_file=True, dir_save=dir_save,\n filename='fit' + str(i) + \"_grad\",\n filetypeout='.png', unit_actual=unit_label_grad, unit_predicted=unit_label_grad,\n plot_title=\"Prediction Gradient\")\n\n plot_error_vec_mean([pval, ptrain], [yval_plot, ytrain_plot],\n label_curves=[\"Validation gradients\", \"Training Gradients\"], unit_predicted=unit_label_grad,\n filename='fit' + str(i) + \"_grad\", dir_save=dir_save, save_plot_to_file=True,\n filetypeout='.png', x_label='Gradients xyz * #atoms * #states ',\n plot_title=\"Gradient mean error\")\n\n plot_error_vec_max([pval, ptrain], [yval_plot, ytrain_plot],\n label_curves=[\"Validation\", \"Training\"],\n unit_predicted=unit_label_grad, filename='fit' + str(i) + \"_grad\",\n dir_save=dir_save, save_plot_to_file=True, filetypeout='.png',\n x_label='Gradients xyz * #atoms * #states ', plot_title=\"Gradient max error\")\n\n except:\n print(\"Error: Could not plot fitting stats\")\n\n error_val = None\n try:\n # Safe fitting Error MAE\n pval = out_model.predict(xval)\n ptrain = out_model.predict(xtrain)\n _, pval = scaler.inverse_transform(y=pval)\n _, ptrain = scaler.inverse_transform(y=ptrain)\n out_model.precomputed_features = False\n out_model.output_as_dict = False\n ptrain2 = out_model.predict(x_rescale[i_train])\n _, ptrain2 = scaler.inverse_transform(y=ptrain2)\n print(\"Info: Max error precomputed and full gradient computation:\")\n print(\"Gradient\", np.max(np.abs(ptrain - ptrain2)))\n error_val = np.mean(np.abs(pval - y[i_val]))\n error_train = np.mean(np.abs(ptrain- y[i_train]))\n np.save(os.path.join(outdir, \"fiterr_valid\" + '_v%i' % i + \".npy\"), error_val)\n np.save(os.path.join(outdir, \"fiterr_train\" + '_v%i' % i + \".npy\"), error_train)\n print(\"error_val:\", error_val)\n print(\"error_train:\", error_train)\n except:\n print(\"Error: Can not save fiterror\")\n\n return error_val\n\n\nif __name__ == \"__main__\":\n print(\"Training Model: \", args['filepath'])\n print(\"Network instance: \", args['index'])\n out = train_model_energy_gradient(args['index'], args['filepath'], args['mode'])\n\nfstdout.close()\n"
] |
[
[
"tensorflow.config.experimental.list_logical_devices",
"numpy.abs",
"matplotlib.use",
"tensorflow.keras.callbacks.LearningRateScheduler",
"numpy.finfo",
"tensorflow.keras.optimizers.Adam",
"numpy.array"
]
] |
ljh4697/reinforcement-learning-kr-v2
|
[
"f7c4ae038796db252b172136f0ca895dc6183cb8"
] |
[
"1-grid-world/2-value-iteration/environment.py"
] |
[
"import tkinter as tk\nimport time\nimport numpy as np\nimport random\nfrom PIL import ImageTk, Image\n\nPhotoImage = ImageTk.PhotoImage\nUNIT = 100 # 픽셀 수\nHEIGHT = 5 # 그리드월드 세로\nWIDTH = 5 # 그리드월드 가로\nTRANSITION_PROB = 1\nPOSSIBLE_ACTIONS = [0, 1, 2, 3] # 상, 하, 좌, 우\nACTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1)] # 좌표로 나타낸 행동\nREWARDS = []\n\n\nclass GraphicDisplay(tk.Tk):\n def __init__(self, value_iteration):\n super(GraphicDisplay, self).__init__()\n self.title('Value Iteration')\n self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT + 50))\n self.texts = []\n self.arrows = []\n self.env = Env()\n self.agent = value_iteration\n self.iteration_count = 0\n self.improvement_count = 0\n self.is_moving = 0\n (self.up, self.down, self.left,\n self.right), self.shapes = self.load_images()\n self.canvas = self._build_canvas()\n self.text_reward(2, 2, \"R : 1.0\")\n self.text_reward(1, 2, \"R : -1.0\")\n self.text_reward(2, 1, \"R : -1.0\")\n\n def _build_canvas(self):\n canvas = tk.Canvas(self, bg='white',\n height=HEIGHT * UNIT,\n width=WIDTH * UNIT)\n # 버튼 초기화\n iteration_button = tk.Button(self, text=\"Calculate\",\n command=self.calculate_value)\n iteration_button.configure(width=10, activebackground=\"#33B5E5\")\n canvas.create_window(WIDTH * UNIT * 0.13, (HEIGHT * UNIT) + 10,\n window=iteration_button)\n\n policy_button = tk.Button(self, text=\"Print Policy\",\n command=self.print_optimal_policy)\n policy_button.configure(width=10, activebackground=\"#33B5E5\")\n canvas.create_window(WIDTH * UNIT * 0.37, (HEIGHT * UNIT) + 10,\n window=policy_button)\n\n policy_button = tk.Button(self, text=\"Move\",\n command=self.move_by_policy)\n policy_button.configure(width=10, activebackground=\"#33B5E5\")\n canvas.create_window(WIDTH * UNIT * 0.62, (HEIGHT * UNIT) + 10,\n window=policy_button)\n\n policy_button = tk.Button(self, text=\"Clear\", command=self.clear)\n policy_button.configure(width=10, activebackground=\"#33B5E5\")\n canvas.create_window(WIDTH * UNIT * 0.87, (HEIGHT * UNIT) + 10,\n window=policy_button)\n\n # 그리드 생성\n for col in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80\n x0, y0, x1, y1 = col, 0, col, HEIGHT * UNIT\n canvas.create_line(x0, y0, x1, y1)\n for row in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80\n x0, y0, x1, y1 = 0, row, HEIGHT * UNIT, row\n canvas.create_line(x0, y0, x1, y1)\n\n # 캔버스에 이미지 추가\n self.rectangle = canvas.create_image(50, 50, image=self.shapes[0])\n canvas.create_image(250, 150, image=self.shapes[1])\n canvas.create_image(150, 250, image=self.shapes[1])\n canvas.create_image(250, 250, image=self.shapes[2])\n\n canvas.pack()\n\n return canvas\n\n def load_images(self):\n PhotoImage = ImageTk.PhotoImage\n up = PhotoImage(Image.open(\"C:/Users/ljhlj/OneDrive/문서/GitHub/reinforcement-learning-kr-v2/1-grid-world/img/up.png\").resize((13, 13)))\n right = PhotoImage(Image.open(\"C:/Users/ljhlj/OneDrive/문서/GitHub/reinforcement-learning-kr-v2/1-grid-world/img/right.png\").resize((13, 13)))\n left = PhotoImage(Image.open(\"C:/Users/ljhlj/OneDrive/문서/GitHub/reinforcement-learning-kr-v2/1-grid-world/img/left.png\").resize((13, 13)))\n down = PhotoImage(Image.open(\"C:/Users/ljhlj/OneDrive/문서/GitHub/reinforcement-learning-kr-v2/1-grid-world/img/down.png\").resize((13, 13)))\n rectangle = PhotoImage(\n Image.open(\"C:/Users/ljhlj/OneDrive/문서/GitHub/reinforcement-learning-kr-v2/1-grid-world/img/rectangle.png\").resize((65, 65)))\n triangle = PhotoImage(\n Image.open(\"C:/Users/ljhlj/OneDrive/문서/GitHub/reinforcement-learning-kr-v2/1-grid-world/img/triangle.png\").resize((65, 65)))\n circle = PhotoImage(Image.open(\"C:/Users/ljhlj/OneDrive/문서/GitHub/reinforcement-learning-kr-v2/1-grid-world/img/circle.png\").resize((65, 65)))\n return (up, down, left, right), (rectangle, triangle, circle)\n\n def clear(self):\n\n if self.is_moving == 0:\n self.iteration_count = 0\n self.improvement_count = 0\n for i in self.texts:\n self.canvas.delete(i)\n\n for i in self.arrows:\n self.canvas.delete(i)\n\n self.agent.value_table = [[0.0] * WIDTH for _ in range(HEIGHT)]\n\n x, y = self.canvas.coords(self.rectangle)\n self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)\n\n def reset(self):\n self.update()\n time.sleep(0.5)\n self.canvas.delete(self.rectangle)\n return self.canvas.coords(self.rectangle)\n\n def text_value(self, row, col, contents, font='Helvetica', size=12,\n style='normal', anchor=\"nw\"):\n origin_x, origin_y = 85, 70\n x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)\n font = (font, str(size), style)\n text = self.canvas.create_text(x, y, fill=\"black\", text=contents,\n font=font, anchor=anchor)\n return self.texts.append(text)\n\n def text_reward(self, row, col, contents, font='Helvetica', size=12,\n style='normal', anchor=\"nw\"):\n origin_x, origin_y = 5, 5\n x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)\n font = (font, str(size), style)\n text = self.canvas.create_text(x, y, fill=\"black\", text=contents,\n font=font, anchor=anchor)\n return self.texts.append(text)\n\n def rectangle_move(self, action):\n base_action = np.array([0, 0])\n location = self.find_rectangle()\n self.render()\n if action == 0 and location[0] > 0: # up\n base_action[1] -= UNIT\n elif action == 1 and location[0] < HEIGHT - 1: # down\n base_action[1] += UNIT\n elif action == 2 and location[1] > 0: # left\n base_action[0] -= UNIT\n elif action == 3 and location[1] < WIDTH - 1: # right\n base_action[0] += UNIT\n\n self.canvas.move(self.rectangle, base_action[0],\n base_action[1]) # move agent\n\n def find_rectangle(self):\n temp = self.canvas.coords(self.rectangle)\n x = (temp[0] / 100) - 0.5\n y = (temp[1] / 100) - 0.5\n return int(y), int(x)\n\n def move_by_policy(self):\n\n if self.improvement_count != 0 and self.is_moving != 1:\n self.is_moving = 1\n x, y = self.canvas.coords(self.rectangle)\n self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)\n\n x, y = self.find_rectangle()\n while len(self.agent.get_action([x, y])) != 0:\n action = random.sample(self.agent.get_action([x, y]), 1)[0]\n self.after(100, self.rectangle_move(action))\n x, y = self.find_rectangle()\n self.is_moving = 0\n\n def draw_one_arrow(self, col, row, action):\n if col == 2 and row == 2:\n return\n if action == 0: # up\n origin_x, origin_y = 50 + (UNIT * row), 10 + (UNIT * col)\n self.arrows.append(self.canvas.create_image(origin_x, origin_y,\n image=self.up))\n elif action == 1: # down\n origin_x, origin_y = 50 + (UNIT * row), 90 + (UNIT * col)\n self.arrows.append(self.canvas.create_image(origin_x, origin_y,\n image=self.down))\n elif action == 3: # right\n origin_x, origin_y = 90 + (UNIT * row), 50 + (UNIT * col)\n self.arrows.append(self.canvas.create_image(origin_x, origin_y,\n image=self.right))\n elif action == 2: # left\n origin_x, origin_y = 10 + (UNIT * row), 50 + (UNIT * col)\n self.arrows.append(self.canvas.create_image(origin_x, origin_y,\n image=self.left))\n\n def draw_from_values(self, state, action_list):\n i = state[0]\n j = state[1]\n for action in action_list:\n self.draw_one_arrow(i, j, action)\n\n def print_values(self, values):\n for i in range(WIDTH):\n for j in range(HEIGHT):\n self.text_value(i, j, round(values[i][j], 2))\n\n def render(self):\n time.sleep(0.1)\n self.canvas.tag_raise(self.rectangle)\n self.update()\n\n def calculate_value(self):\n self.iteration_count += 1\n for i in self.texts:\n self.canvas.delete(i)\n self.agent.value_iteration()\n self.print_values(self.agent.value_table)\n\n def print_optimal_policy(self):\n self.improvement_count += 1\n for i in self.arrows:\n self.canvas.delete(i)\n for state in self.env.get_all_states():\n action = self.agent.get_action(state)\n self.draw_from_values(state, action)\n\n\nclass Env:\n def __init__(self):\n self.transition_probability = TRANSITION_PROB\n self.width = WIDTH # Width of Grid World\n self.height = HEIGHT # Height of GridWorld\n self.reward = [[0] * WIDTH for _ in range(HEIGHT)]\n self.possible_actions = POSSIBLE_ACTIONS\n self.reward[2][2] = 1 # reward 1 for circle\n self.reward[1][2] = -1 # reward -1 for triangle\n self.reward[2][1] = -1 # reward -1 for triangle\n self.all_state = []\n\n for x in range(WIDTH):\n for y in range(HEIGHT):\n state = [x, y]\n self.all_state.append(state)\n\n def get_reward(self, state, action):\n next_state = self.state_after_action(state, action)\n return self.reward[next_state[0]][next_state[1]]\n\n def state_after_action(self, state, action_index):\n action = ACTIONS[action_index]\n return self.check_boundary([state[0] + action[0], state[1] + action[1]])\n\n @staticmethod\n def check_boundary(state):\n state[0] = (0 if state[0] < 0 else WIDTH - 1\n if state[0] > WIDTH - 1 else state[0])\n state[1] = (0 if state[1] < 0 else HEIGHT - 1\n if state[1] > HEIGHT - 1 else state[1])\n return state\n\n def get_transition_prob(self, state, action):\n return self.transition_probability\n\n def get_all_states(self):\n return self.all_state\n"
] |
[
[
"numpy.array"
]
] |
cpuimage/segan
|
[
"f4db80ef50de0490aea8f3526073a2a45ddc6d9d"
] |
[
"data_loader.py"
] |
[
"from __future__ import print_function\nimport tensorflow as tf\nfrom ops import *\nimport numpy as np\n\n\ndef pre_emph(x, coeff=0.95):\n x0 = tf.reshape(x[0], [1,])\n diff = x[1:] - coeff * x[:-1]\n concat = tf.concat([x0, diff], 0)\n return concat\n\ndef de_emph(y, coeff=0.95):\n if coeff <= 0:\n return y\n x = np.zeros(y.shape[0], dtype=np.float32)\n x[0] = y[0]\n for n in range(1, y.shape[0], 1):\n x[n] = coeff * x[n - 1] + y[n]\n return x\n\ndef read_and_decode(filename_queue, canvas_size, preemph=0.):\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'wav_raw': tf.FixedLenFeature([], tf.string),\n 'noisy_raw': tf.FixedLenFeature([], tf.string),\n })\n wave = tf.decode_raw(features['wav_raw'], tf.int32)\n wave.set_shape(canvas_size)\n wave = (2./65535.) * tf.cast((wave - 32767), tf.float32) + 1.\n noisy = tf.decode_raw(features['noisy_raw'], tf.int32)\n noisy.set_shape(canvas_size)\n noisy = (2./65535.) * tf.cast((noisy - 32767), tf.float32) + 1.\n\n if preemph > 0:\n wave = tf.cast(pre_emph(wave, preemph), tf.float32)\n noisy = tf.cast(pre_emph(noisy, preemph), tf.float32)\n\n return wave, noisy\n"
] |
[
[
"tensorflow.concat",
"tensorflow.FixedLenFeature",
"tensorflow.decode_raw",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.TFRecordReader",
"numpy.zeros"
]
] |
ischlag/Fast-Weight-Memory-public
|
[
"64c077f02ec320ec535cb66db3600453e1ef445f"
] |
[
"catbAbI/trainers/catbAbI_trainer.py"
] |
[
"\"\"\"\nImplements the trainer class that keeps track of the training state with some features:\n- saves best and last model after every evaluation\n- uses the log function to log terminal/text file\n- can restore the state of a training run and continue seamlessly\n- early stopping\n- saves last model whenever it logs (remove if model is big!)\n- saves best model whenever it evaluates\n- writes logs to tf.records but also csv file\n- keeps track of accuracies for individual tasks and both settings,\n LM: language modelling, RA: answer tokens only\n- saves the source folder\n\"\"\"\n\nimport os\nimport numpy as np\nimport torch\nimport copy\n\nfrom tensorboardX import SummaryWriter\nfrom munch import Munch, munchify\nfrom utils.lib import *\n\nBEST_MODEL_FILE_NAME = \"best_eval_state.pt\"\nLAST_MODEL_FILE_NAME = \"last_eval_state.pt\"\nTF_TRAIN_FOLDER_NAME = \"train\"\nTF_EVAL_FOLDER_NAME = \"valid\"\nCSV_FILE_NAME = \"exp_logging.csv\"\nCONFIG_FILE_NAME = \"exp_conf.pickle\"\nNECESSARY_PARAMS = [\n \"log_every_n_steps\",\n \"eval_every_n_steps\",\n \"device\",\n \"log_folder\",\n \"max_steps\", # -1 runs indefinitely\n \"write_logs\",\n \"early_stopping_steps\", # -1 ignores early stopping\n \"QM\", # id of ra-mode positions\n \"PAD\",\n \"ra_mode\", # identify losses for all y or just following the <ra> token\n]\nTRAIN_LABELS = [\n \"step\",\n \"loss\",\n \"mem_norm\",\n \"mem_abs_max\",\n \"ra_loss\",\n \"lm_loss\",\n \"regularizer\",\n \"ra_accuracy\",\n \"lm_accuracy\",\n \"batches_per_sec\",\n \"tokens_per_sec\",\n] + [\"ra_acc_task_{}\".format(i) for i in range(21)] \\\n + [\"lm_acc_task_{}\".format(i) for i in range(21)]\nEVAL_LABELS = [\n \"step\",\n \"loss\",\n \"mem_norm\",\n \"mem_abs_max\",\n \"ra_loss\",\n \"lm_loss\",\n \"regularizer\",\n \"ra_accuracy\",\n \"lm_accuracy\",\n \"batches_per_sec\",\n \"tokens_per_sec\",\n] + [\"ra_acc_task_{}\".format(i) for i in range(21)] \\\n + [\"lm_acc_task_{}\".format(i) for i in range(21)]\n\n\ndef load_default_params(p):\n p[\"log_every_n_steps\"] = 25\n p[\"eval_every_n_steps\"] = 250\n p[\"eval_steps\"] = -1\n p[\"log_folder\"] = \"logs/\"\n p[\"max_steps\"] = -1\n p[\"write_logs\"] = True\n p[\"early_stopping_steps\"] = -1\n p[\"lr_warmup\"] = -1\n\n\ndef get_string_description(p):\n return \"\"\n\n\nclass Trainer:\n def __init__(self, model, params, train_generator, eval_generator,\n optimizer, criterion, log):\n assert_entries_exist(params, NECESSARY_PARAMS)\n self.p = params\n self.model = model.to(self.p.device)\n self.optimizer = optimizer\n self.criterion = criterion\n self.train_generator = train_generator\n self.train_iterator = iter(train_generator)\n self.eval_generator = eval_generator\n self.log = log\n\n # captures a restorable state\n self.state = Munch()\n self.state.global_step = 0\n self.state.epochs = 0\n self.state.train_time = 0\n self.state.total_time = 0\n self.state.hidden_state = None\n\n self.state.best_eval_loss = float(\"inf\")\n self.state.best_eval_ra_loss = float(\"inf\")\n self.state.best_eval_lm_loss = float(\"inf\")\n self.state.best_eval_ra_acc = 0\n self.state.best_eval_lm_acc = 0\n self.state.best_train_time = 0\n self.state.best_total_time = 0\n self.state.best_step = 0\n self.state.best_hidden_state = None\n\n # state paths\n self.best_eval_state_path = os.path.join(self.p.log_folder,\n BEST_MODEL_FILE_NAME)\n self.last_eval_state_path = os.path.join(self.p.log_folder,\n LAST_MODEL_FILE_NAME)\n # event paths (tensorboard and csv)\n self.tb_train_path = os.path.join(self.p.log_folder, TF_TRAIN_FOLDER_NAME)\n self.tb_eval_path = os.path.join(self.p.log_folder, TF_EVAL_FOLDER_NAME)\n\n self.tf_train_writer = None\n self.tf_eval_writer = None\n self.csv_train_writer = None\n self.csv_eval_writer = None\n\n if self.p.write_logs:\n # create summary writer\n self.tf_train_writer = SummaryWriter(self.tb_train_path)\n self.tf_eval_writer = SummaryWriter(self.tb_eval_path)\n # create csv log file if nonexistent\n self.csv_train_writer = CsvWriter(column_names=TRAIN_LABELS,\n path=self.tb_train_path,\n file_name=CSV_FILE_NAME)\n self.csv_eval_writer = CsvWriter(column_names=EVAL_LABELS,\n path=self.tb_eval_path,\n file_name=CSV_FILE_NAME)\n # store sacred params\n save_config(self.p, self.p.log_folder, CONFIG_FILE_NAME)\n\n # continue training where the last state ended (if it exists)\n if os.path.exists(self.last_eval_state_path):\n self.log(\"Previous model found! Reloading last state.\")\n self.load_state(path=self.last_eval_state_path)\n\n def _forward(self, x, y, task_id, seq_len, hidden_state, voi):\n \"\"\" Compute a train/eval forward pass and update the variables\n of interest (VOI). \"\"\"\n # x: [batch_size, seq_length]\n # y: [batch_size, seq_length]\n # task_id: [batch_size, seq_length]\n # seq_len: [batch_size]\n\n # sort batches from longest to shortest for dynamic batch_size\n _, indecies = torch.sort(seq_len, descending=True)\n x = x[indecies]\n y = y[indecies]\n task_id = task_id[indecies]\n seq_len = seq_len[indecies]\n\n # move batch to accelerator\n x = x.to(self.p.device)\n y = y.to(self.p.device)\n task_id = task_id.to(self.p.device)\n seq_len = seq_len.to(self.p.device)\n\n # feed the model to compute the logits and loss\n logits, regularizer, hidden_state = self.model(x, seq_len, hidden_state)\n # logits: [batch_size, seq_len, vocab_size]\n # regularizer: [batch_size, 1] or None\n\n\n def compute_loss(logits, mask):\n if mask.sum().item() == 0:\n loss = torch.tensor(0).to(mask.device)\n else:\n loss = self.criterion(logits[mask], y[mask].view(-1))\n return loss\n\n ra_loss = compute_loss(logits, x == self.p.QM)\n lm_loss = compute_loss(logits, x != self.p.PAD)\n\n\n def compute_accs(logits, y, task_id, ra_pos):\n # y[ra_pos]: [batch_size * n_matches, 1]\n if ra_pos.sum().item() == 0:\n mean_accuracy = torch.tensor(0).long().to(ra_pos.device)\n n_per_task = torch.zeros(21).long().to(ra_pos.device)\n correct_per_task = torch.zeros(21).long().to(ra_pos.device)\n else:\n # compute VOIs\n accuracy = (torch.argmax(logits[ra_pos], dim=1) == y[ra_pos].view(-1)).int()\n # accuracy: [batch_size * n_matches]\n mean_accuracy = torch.mean(accuracy.float(), dim=0)\n # mean_accuracy: []\n\n # accuracy mask for individual tasks (with dim 0 for all tasks)\n # task_id: [batch_size * n_matches]\n mask = torch.stack([(i == task_id[ra_pos])\n for i in range(0, 21)], dim=1).int()\n # mask: [batch_size * n_matches, 21]\n # keep all for task 0\n mask[:, 0] = torch.ones(mask.shape[0])\n\n # number of elements per task\n n_per_task = mask.sum(dim=0)\n # n_per_task: [21]\n\n # compute the masked accuracy\n accuracy_masked = torch.stack([accuracy]*21, dim=1) * mask\n correct_per_task = accuracy_masked.sum(dim=0)\n # correct_per_task: [21]\n\n return mean_accuracy, n_per_task, correct_per_task\n\n _, ra_n_per_task, ra_correct_per_task = compute_accs(\n logits.cpu(), y.cpu(), task_id.cpu(), x.cpu() == self.p.QM)\n _, lm_n_per_task, lm_correct_per_task = compute_accs(\n logits.cpu(), y.cpu(), task_id.cpu(), x.cpu() != self.p.PAD)\n\n if self.p.ra_mode:\n loss = ra_loss\n else:\n loss = lm_loss\n\n if regularizer is not None:\n # average across batch dimension\n regularizer = torch.mean(regularizer, dim=0)\n voi.regularizer.append(regularizer.item())\n loss = loss + self.p.regularize * regularizer\n\n # Note: this might not be tracked by the model\n if hasattr(self.model, \"avg_mem\"):\n avg_mem = self.model.avg_mem\n mem_norm = avg_mem[0].norm()\n mem_abs_max = avg_mem[0].abs().max()\n else:\n mem_norm = torch.tensor([0])\n mem_abs_max = torch.tensor([0])\n\n token_count = seq_len.sum()\n\n # track VOIs\n voi.losses.append(loss.item())\n voi.mem_norms.append(mem_norm.item())\n voi.mem_abs_maxs.append(mem_abs_max.item())\n\n voi.ra_losses.append(ra_loss.item())\n voi.ra_n_per_task.append(ra_n_per_task.detach())\n voi.ra_correct_per_task.append(ra_correct_per_task.detach())\n\n voi.lm_losses.append(lm_loss.item())\n voi.lm_n_per_task.append(lm_n_per_task.detach())\n voi.lm_correct_per_task.append(lm_correct_per_task.detach())\n\n voi.token_counts.append(token_count.item())\n\n return loss, hidden_state\n\n def train(self):\n self.log(\"Starting train ...\")\n self.log(\"log_folder: {}\\n\".format(self.p.log_folder))\n self.model.train()\n self.optimizer.zero_grad()\n\n # variables of interest (don't forget to reset them after logging)\n train_voi = Munch()\n train_voi.losses = []\n train_voi.mem_norms = []\n train_voi.mem_abs_maxs = []\n\n train_voi.ra_losses = []\n train_voi.ra_accuracies = []\n train_voi.ra_n_per_task = []\n train_voi.ra_correct_per_task = []\n\n train_voi.lm_losses = []\n train_voi.lm_accuracies = []\n train_voi.lm_n_per_task = []\n train_voi.lm_correct_per_task = []\n\n train_voi.token_counts = []\n train_voi.batches = 0\n train_voi.regularizer = []\n\n # timers\n step_time = StopWatch(start=False) # forward pass time\n loading_time = StopWatch() # complement to step_time\n log_time = StopWatch() # time passed between logs\n train_time = StopWatch(start_with=self.state.train_time)\n total_time = StopWatch(start_with=self.state.total_time)\n\n while self.state.global_step < self.p.max_steps or self.p.max_steps == -1:\n # get next batch and reset iterator if epoch is over\n try:\n x, y, task_id, seq_len = next(self.train_iterator)\n # x: [batch_size, seq_length]\n # y, task_id, seq_len: [batch_size, 1]\n except StopIteration:\n self.state.epochs += 1\n self.train_iterator = iter(self.train_generator)\n continue\n\n loading_time.pause()\n step_time.start()\n\n # run forward\n curr_state = self.state.hidden_state\n loss, curr_state = self._forward(\n x, y, task_id, seq_len, curr_state, train_voi\n )\n self.state.hidden_state = traverse(curr_state, lambda t: t.detach())\n\n # stop taining is loss is nan\n if torch.isnan(loss):\n self.log(\"loss is nan. Exit train().\")\n return\n\n # update weights\n self.optimizer.zero_grad()\n if loss != 0:\n loss.backward()\n\n if self.p.lr_warmup > 0 and self.state.global_step < self.p.lr_warmup:\n new_lr = self.p.learning_rate * self.state.global_step / self.p.lr_warmup\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = new_lr\n\n self.optimizer.step()\n\n step_time.pause()\n loading_time.start()\n\n self.state.global_step += 1\n train_voi.batches += 1\n\n # log train summaries\n if self.state.global_step % self.p.log_every_n_steps == 0:\n # compute summaries\n ra_sum_correct = torch.stack(train_voi.ra_correct_per_task, dim=0).sum(dim=0)[0]\n ra_sum_targets = torch.stack(train_voi.ra_n_per_task, dim=0).sum(dim=0)[0]\n avg_ra_acc = ra_sum_correct.float() / ra_sum_targets.float()\n\n lm_sum_correct = torch.stack(train_voi.lm_correct_per_task, dim=0).sum(dim=0)[0]\n lm_sum_targets = torch.stack(train_voi.lm_n_per_task, dim=0).sum(dim=0)[0]\n avg_lm_acc = lm_sum_correct.float() / lm_sum_targets.float()\n\n avg_loss = np.mean(train_voi.losses)\n avg_mem_norm = np.mean(train_voi.mem_norms)\n avg_mem_abs_max = np.mean(train_voi.mem_abs_maxs)\n avg_regularizer = np.mean(train_voi.regularizer) \\\n if len(train_voi.regularizer) > 0 else 0\n avg_ra_loss = np.mean(train_voi.ra_losses)\n avg_lm_loss = np.mean(train_voi.lm_losses)\n sum_token_count = np.sum(train_voi.token_counts)\n avg_loading_time = loading_time.read() / train_voi.batches\n avg_step_time = step_time.read() / train_voi.batches\n\n secs_passed = log_time.read_and_reset()\n hours_passed = total_time.read() / (60.0 * 60.0)\n batches_per_sec = train_voi.batches / secs_passed\n tokens_per_sec = sum_token_count / secs_passed\n\n # keep track of train and total time\n self.state.train_time = train_time.read()\n self.state.total_time = total_time.read()\n\n # write terminal and file summaries\n vars = [\n (\"ep\", self.state.epochs, \"\"),\n (\"step\", self.state.global_step, \":4\"),\n (\"loss\", avg_loss, \":.5f\"),\n (\"reg\", avg_regularizer, \":.5f\"),\n (\"ra_acc\", avg_ra_acc * 100, \":5.2f\"),\n (\"lm_acc\", avg_lm_acc * 100, \":5.2f\"),\n (\"ra_loss\", avg_ra_loss, \":.5f\"),\n (\"lm_loss\", avg_lm_loss, \":.5f\"),\n (\"hours\", hours_passed, \":.2f\"),\n (\"b/s\", batches_per_sec, \":.2f\"),\n (\"t/s\", tokens_per_sec, \":5.0f\"),\n ]\n self.log(terminal_format(vars))\n\n # write tensorboard and csv summaries\n if self.p.write_logs:\n scalars = [self.state.global_step,\n avg_loss,\n avg_mem_norm,\n avg_mem_abs_max,\n avg_ra_loss,\n avg_lm_loss,\n avg_regularizer,\n avg_ra_acc,\n avg_lm_acc,\n batches_per_sec,\n tokens_per_sec]\n ra_correct_stack = torch.stack(train_voi.ra_correct_per_task).sum(0).float()\n ra_count_stack = torch.stack(train_voi.ra_n_per_task).sum(0).float()\n # ra_correct_stack, ra_count_stack: [21]\n\n ra_mask_nonzero = ra_count_stack > 0.0\n # ra_nonzero_batches: [21]\n\n ra_accs = torch.zeros_like(ra_correct_stack).float().to(ra_correct_stack.device)\n ra_accs[ra_mask_nonzero] = ra_correct_stack[ra_mask_nonzero] / ra_count_stack[ra_mask_nonzero]\n # ra_accs: [21]\n scalars += [acc.item() for acc in ra_accs]\n\n\n lm_correct_stack = torch.stack(train_voi.lm_correct_per_task).sum(0).float()\n lm_count_stack = torch.stack(train_voi.lm_n_per_task).sum(0).float()\n # lm_correct_stack, lm_count_stack: [21]\n\n lm_mask_nonzero = lm_count_stack > 0\n # lm_mask_nonzero: [21]\n\n lm_accs = torch.zeros_like(lm_count_stack).float().to(lm_correct_stack.device)\n lm_accs[lm_mask_nonzero] = lm_correct_stack[lm_mask_nonzero] / lm_count_stack[lm_mask_nonzero]\n # ra_accs: [21]\n scalars += [acc.item() for acc in lm_accs]\n\n tf_add_scalars(self.tf_train_writer, TRAIN_LABELS, scalars)\n self.csv_train_writer.write(scalars)\n # restarts mess a little with tensorboard, saving the state here\n # would help to deal with that but it is a slow down for big models.\n # self.save_state(target=self.last_eval_state_path)\n\n # clear\n log_time.start()\n step_time.read_and_reset()\n loading_time.read_and_reset()\n loading_time.start()\n train_voi.losses = []\n train_voi.mem_norms = []\n train_voi.mem_abs_maxs = []\n\n train_voi.ra_losses = []\n train_voi.ra_accuracies = []\n train_voi.ra_n_per_task = []\n train_voi.ra_correct_per_task = []\n\n train_voi.lm_losses = []\n train_voi.lm_accuracies = []\n train_voi.lm_n_per_task = []\n train_voi.lm_correct_per_task = []\n\n train_voi.token_counts = []\n train_voi.batches = 0\n train_voi.regularizer = []\n\n # run evaluation\n if self.state.global_step % self.p.eval_every_n_steps == 0:\n loading_time.pause()\n log_time.pause()\n train_time.pause()\n\n # build the initial hidden state for evaluation\n # replicate the hidden state of the first row in the train_batch\n pick_first = lambda t: t[[0]]\n stack_states = lambda t: torch.cat([t]*self.p.eval_batch_size, dim=0)\n h = copy.deepcopy(self.state.hidden_state)\n single_hidden_state = traverse(h, pick_first)\n eval_hidden_state = traverse(single_hidden_state, stack_states)\n\n self.evaluate(write_logs=self.p.write_logs,\n hidden_state=eval_hidden_state)\n self.model.train()\n\n # check for early stopping\n steps_without_progress = self.state.global_step - self.state.best_step\n if self.p.early_stopping_steps >= 0 and \\\n steps_without_progress > self.p.early_stopping_steps:\n self.log(\"No progress for {} steps\".format(steps_without_progress))\n self.log(\"Stopping training.\")\n return\n\n loading_time.start()\n log_time.start()\n train_time.start()\n\n def evaluate(self, generator=None, write_logs=False, hidden_state=None, progress=False):\n if generator is None:\n generator = self.eval_generator\n n_samples = len(generator)\n\n self.model.eval()\n\n # variables of interest\n eval_voi = Munch()\n eval_voi.losses = []\n eval_voi.mem_norms = []\n eval_voi.mem_abs_maxs = []\n\n eval_voi.ra_losses = []\n eval_voi.ra_accuracies = []\n eval_voi.ra_n_per_task = []\n eval_voi.ra_correct_per_task = []\n\n eval_voi.lm_losses = []\n eval_voi.lm_accuracies = []\n eval_voi.lm_n_per_task = []\n eval_voi.lm_correct_per_task = []\n\n eval_voi.token_counts = []\n eval_voi.batches = 0\n eval_voi.regularizer = []\n\n # timers\n step_time = StopWatch(start=False) # forward pass time\n loading_time = StopWatch() # complement to step_time\n eval_time = StopWatch()\n\n with torch.no_grad():\n counter = 0\n start_time = time.time()\n for x, y, task_id, seq_len in generator: \n\n # forward pass and track variables\n loading_time.pause()\n step_time.start()\n\n _, hidden_state = self._forward(x, y, task_id, seq_len,\n hidden_state, eval_voi)\n\n step_time.pause()\n loading_time.start()\n eval_voi.batches += 1\n\n # print progress\n if progress and counter % 100 == 0 and counter > 0:\n elapsed = time.time() - start_time\n speed = elapsed / counter\n remaining = (n_samples - counter) * speed / 60.\n print(\"{}/{} done. ~{:.1f} mins remaining\".format(counter, n_samples, remaining))\n counter += 1\n\n if self.p.eval_steps > 0 and eval_voi.batches > self.p.eval_steps:\n break\n\n loading_time.pause()\n\n # compute summaries\n ra_sum_correct = torch.stack(eval_voi.ra_correct_per_task, dim=0).sum(dim=0)[0]\n ra_sum_targets = torch.stack(eval_voi.ra_n_per_task, dim=0).sum(dim=0)[0]\n avg_ra_acc = ra_sum_correct.float() / ra_sum_targets.float()\n\n lm_sum_correct = torch.stack(eval_voi.lm_correct_per_task, dim=0).sum(dim=0)[0]\n lm_sum_targets = torch.stack(eval_voi.lm_n_per_task, dim=0).sum(dim=0)[0]\n avg_lm_acc = lm_sum_correct.float() / lm_sum_targets.float()\n\n avg_loss = np.mean(eval_voi.losses)\n avg_mem_norm = np.mean(eval_voi.mem_norms)\n avg_mem_abs_max = np.mean(eval_voi.mem_abs_maxs)\n avg_ra_loss = np.mean(eval_voi.ra_losses)\n avg_lm_loss = np.mean(eval_voi.lm_losses)\n avg_regularizer = np.mean(eval_voi.regularizer) \\\n if len(eval_voi.regularizer) > 0 else 0\n sum_token_count = np.sum(eval_voi.token_counts)\n\n secs_passed = eval_time.read_and_reset()\n batches_per_sec = eval_voi.batches / secs_passed\n tokens_per_sec = sum_token_count / secs_passed\n\n # track best summaries so far and save state/model\n if avg_loss < self.state.best_eval_loss and write_logs:\n # new best model\n self.state.best_eval_loss = avg_loss\n self.state.best_eval_ra_loss = avg_ra_loss\n self.state.best_eval_lm_loss = avg_lm_loss\n self.state.best_eval_ra_acc = avg_ra_acc\n self.state.best_eval_lm_acc = avg_lm_acc\n self.state.best_train_time = self.state.train_time\n self.state.best_total_time = self.state.total_time\n self.state.best_step = self.state.global_step\n # save best state so far\n self.save_state(target=self.best_eval_state_path)\n\n # save current state\n if write_logs:\n self.save_state(target=self.last_eval_state_path)\n\n # write terminal and file summaries\n vars = [\n (\"eval\", \"\"),\n (\"loss\", avg_loss, \":.5f\"),\n (\"reg\", avg_regularizer, \":.5f\"),\n (\"ra_acc\", avg_ra_acc * 100, \":5.2f\"),\n (\"lm_acc\", avg_lm_acc * 100, \":5.2f\"),\n (\"ra_loss\", avg_ra_loss, \":.5f\"),\n (\"lm_loss\", avg_lm_loss, \":.5f\"),\n (\"b/s\", batches_per_sec, \":.2f\"),\n (\"t/s\", tokens_per_sec, \":5.0f\"),\n (\"| best:\", \"\"),\n (\"loss\", self.state.best_eval_loss, \":.5f\"),\n (\"ra_acc\", self.state.best_eval_ra_acc * 100, \":5.2f\"),\n (\"lm_acc\", self.state.best_eval_lm_acc * 100, \":5.2f\"),\n ]\n self.log(\"\")\n self.log(terminal_format(vars))\n # print folder path for easier identification of running experiments\n self.log(\"(\" + self.p.log_folder + \")\")\n self.log(\"\")\n\n # write tensorboard summaries\n if write_logs:\n scalars = [self.state.global_step,\n avg_loss,\n avg_mem_norm,\n avg_mem_abs_max,\n avg_ra_loss,\n avg_lm_loss,\n avg_regularizer,\n avg_ra_acc,\n avg_lm_acc,\n batches_per_sec,\n tokens_per_sec]\n\n ra_correct_stack = torch.stack(eval_voi.ra_correct_per_task).sum(0).float()\n ra_count_stack = torch.stack(eval_voi.ra_n_per_task).sum(0).float()\n # ra_correct_stack, ra_count_stack: [21]\n\n ra_mask_nonzero = ra_count_stack > 0.0\n # ra_nonzero_batches: [21]\n\n ra_accs = torch.zeros_like(ra_correct_stack).float().to(ra_correct_stack.device)\n ra_accs[ra_mask_nonzero] = ra_correct_stack[ra_mask_nonzero] / ra_count_stack[ra_mask_nonzero]\n # ra_accs: [21]\n scalars += [acc.item() for acc in ra_accs]\n\n\n lm_correct_stack = torch.stack(eval_voi.lm_correct_per_task).sum(0).float()\n lm_count_stack = torch.stack(eval_voi.lm_n_per_task).sum(0).float()\n # lm_correct_stack, lm_count_stack: [21]\n\n lm_mask_nonzero = lm_count_stack > 0\n # lm_mask_nonzero: [21]\n\n lm_accs = torch.zeros_like(lm_count_stack).float().to(lm_correct_stack.device)\n lm_accs[lm_mask_nonzero] = lm_correct_stack[lm_mask_nonzero] / lm_count_stack[lm_mask_nonzero]\n # ra_accs: [21]\n scalars += [acc.item() for acc in lm_accs]\n\n tf_add_scalars(self.tf_eval_writer, EVAL_LABELS, scalars)\n self.csv_eval_writer.write(scalars)\n\n\n def save_state(self, target):\n curr_state = {\n \"state\": self.state,\n \"model\": self.model.state_dict(),\n \"optimizer\": self.optimizer.state_dict()\n }\n torch.save(obj=curr_state, f=target)\n\n def load_state(self, path=None):\n if path is None:\n path = self.best_eval_state_path\n curr_state = torch.load(path)\n\n # lstm weight drop fix\n if self.p.model_name == \"lm_lstm\":\n for key in copy.deepcopy(curr_state[\"model\"]).keys():\n if \"old_module.W_hh\" in key[-15:]:\n del curr_state[\"model\"][key]\n\n self.model.load_state_dict(curr_state[\"model\"])\n self.optimizer.load_state_dict(curr_state[\"optimizer\"])\n self.state = munchify(curr_state[\"state\"])\n"
] |
[
[
"torch.mean",
"torch.ones",
"torch.isnan",
"torch.load",
"torch.cat",
"torch.zeros",
"torch.argmax",
"torch.zeros_like",
"torch.tensor",
"numpy.mean",
"torch.sort",
"torch.no_grad",
"torch.stack",
"numpy.sum",
"torch.save"
]
] |
CPrescher/fabio
|
[
"9f3e8ec347d75d27dd5e11087555810a01864fae"
] |
[
"fabio/test/testnumpyimage.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Project: Fable Input Output\n# https://github.com/silx-kit/fabio\n#\n# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Jérôme Kieffer ([email protected])\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nTest for numpy images.\n\"\"\"\n__author__ = \"Jérôme Kieffer\"\n__date__ = \"16/06/2016\"\nimport os\nimport sys\nimport unittest\nif __name__ == '__main__':\n import pkgutil\n __path__ = pkgutil.extend_path([os.path.dirname(__file__)], \"fabio.test\")\nfrom .utilstest import UtilsTest\nimport numpy\nlogger = UtilsTest.get_logger(__file__)\nfabio = sys.modules[\"fabio\"]\nfrom fabio.numpyimage import NumpyImage\nfrom fabio.openimage import openimage\n\n\nclass TestNumpy(unittest.TestCase):\n \"\"\"basic test\"\"\"\n\n def setUp(self):\n \"\"\"Generate files\"\"\"\n\n self.ary = numpy.random.randint(0, 6500, size=99).reshape(11, 9).astype(\"uint16\")\n self.fn = os.path.join(UtilsTest.tempdir, \"numpy.npy\")\n self.fn2 = os.path.join(UtilsTest.tempdir, \"numpy2.npy\")\n numpy.save(self.fn, self.ary)\n\n def tearDown(self):\n unittest.TestCase.tearDown(self)\n for i in (self.fn, self.fn2):\n if os.path.exists(i):\n os.unlink(i)\n self.ary = self.fn = self.fn2 = None\n\n def test_read(self):\n \"\"\" check we can read pnm images\"\"\"\n obj = openimage(self.fn)\n\n self.assertEqual(obj.bytecode, numpy.uint16, msg=\"bytecode is OK\")\n self.assertEqual(9, obj.dim1, \"dim1\")\n self.assertEqual(11, obj.dim2, \"dim2\")\n self.assert_(numpy.allclose(obj.data, self.ary), \"data\")\n\n def test_write(self):\n \"\"\" check we can write numpy images\"\"\"\n ref = NumpyImage(data=self.ary)\n ref.save(self.fn2)\n obj = openimage(self.fn2)\n self.assertEqual(obj.bytecode, numpy.uint16, msg=\"bytecode is OK\")\n self.assertEqual(9, obj.dim1, \"dim1\")\n self.assertEqual(11, obj.dim2, \"dim2\")\n self.assert_(numpy.allclose(obj.data, self.ary), \"data\")\n\n def test_multidim(self):\n for shape in (10,), (10, 15), (10, 15, 20), (10, 15, 20, 25):\n ary = numpy.random.random(shape).astype(\"float32\")\n numpy.save(self.fn, ary)\n obj = openimage(self.fn)\n\n self.assertEqual(obj.bytecode, numpy.float32, msg=\"bytecode is OK\")\n self.assertEqual(shape[-1], obj.dim1, \"dim1\")\n dim2 = 1 if len(shape) == 1 else shape[-2]\n self.assertEqual(dim2, obj.dim2, \"dim2\")\n nframes = 1\n if len(shape) > 2:\n for i in shape[:-2]:\n nframes *= i\n self.assertEqual(nframes, obj.nframes, \"nframes\")\n if os.path.exists(self.fn):\n os.unlink(self.fn)\n\n\ndef suite():\n testsuite = unittest.TestSuite()\n testsuite.addTest(TestNumpy(\"test_read\"))\n testsuite.addTest(TestNumpy(\"test_write\"))\n testsuite.addTest(TestNumpy(\"test_multidim\"))\n return testsuite\n\nif __name__ == '__main__':\n runner = unittest.TextTestRunner()\n runner.run(suite())\n"
] |
[
[
"numpy.random.random",
"numpy.allclose",
"numpy.save",
"numpy.random.randint"
]
] |
Jack12xl/taichi
|
[
"80eba2ee9f0a471f86f1786625ebda04bf3c7236"
] |
[
"tests/python/test_static.py"
] |
[
"import numpy as np\nimport pytest\n\nimport taichi as ti\n\n\[email protected]('val', [0, 1])\[email protected](ti.cpu)\ndef test_static_if(val):\n x = ti.field(ti.i32)\n\n ti.root.dense(ti.i, 1).place(x)\n\n @ti.kernel\n def static():\n if ti.static(val > 0.5):\n x[0] = 1\n else:\n x[0] = 0\n\n static()\n assert x[0] == val\n\n\[email protected](ti.cpu)\ndef test_static_if_error():\n x = ti.field(ti.i32)\n\n ti.root.dense(ti.i, 1).place(x)\n\n @ti.kernel\n def static(val: float):\n if ti.static(val > 0.5):\n x[0] = 1\n else:\n x[0] = 0\n\n with pytest.raises(ValueError, match='must be compile-time constants'):\n static(42)\n\n\[email protected]()\ndef test_static_ndrange():\n n = 3\n x = ti.Matrix.field(n, n, dtype=ti.f32, shape=(n, n))\n\n @ti.kernel\n def fill():\n w = [0, 1, 2]\n for i, j in ti.static(ti.ndrange(3, 3)):\n x[i, j][i, j] = w[i] + w[j] * 2\n\n fill()\n for i in range(3):\n for j in range(3):\n assert x[i, j][i, j] == i + j * 2\n\n\[email protected](ti.cpu)\ndef test_static_break():\n x = ti.field(ti.i32, 5)\n\n @ti.kernel\n def func():\n for i in ti.static(range(5)):\n x[i] = 1\n if ti.static(i == 2):\n break\n\n func()\n\n assert np.allclose(x.to_numpy(), np.array([1, 1, 1, 0, 0]))\n\n\[email protected](ti.cpu)\ndef test_static_continue():\n x = ti.field(ti.i32, 5)\n\n @ti.kernel\n def func():\n for i in ti.static(range(5)):\n if ti.static(i == 2):\n continue\n x[i] = 1\n\n func()\n\n assert np.allclose(x.to_numpy(), np.array([1, 1, 0, 1, 1]))\n"
] |
[
[
"numpy.array"
]
] |
Jacobjeevan/Reddit-Gild-Predictor
|
[
"4ef1ba76a51b48a4f37df04ddc059119a2ab5034"
] |
[
"src/data/preprocess.py"
] |
[
"import pandas as pd\nimport argparse\nfrom CommentData import CommentData\nfrom AuthorData import AuthorData\nfrom GildData import GildData\nfrom ThreadData import ThreadData\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom pathlib import Path\n\n\nclass preprocess:\n\n def __init__(self, args):\n self.initializeDataObjects()\n self.loadpath = \"../../data/raw/\"\n self.savepath = args.savepath\n if args.loadpath != \"../../data/raw/\":\n self.loadpath = args.loadpath\n self.setDifferentLoadpath()\n self.loadData()\n self.convertToPandas()\n self.MergedData = None\n\n def initializeDataObjects(self):\n self.threaddata = ThreadData()\n self.commentdata = CommentData()\n self.gilddata = GildData()\n self.authordata = AuthorData()\n \n def setDifferentLoadpath(self):\n self.threaddata.setpath(self.loadpath)\n self.commentdata.setpath(self.loadpath)\n self.gilddata.setpath(self.loadpath)\n self.authordata.setpath(self.loadpath)\n\n def loadData(self):\n self.threaddata.loadData()\n self.commentdata.loadData()\n self.gilddata.loadData()\n self.authordata.loadData()\n \n def convertToPandas(self):\n self.threaddata = pd.DataFrame(self.threaddata.getData())\n self.commentdata = pd.DataFrame(self.commentdata.getData())\n self.gilddata = pd.DataFrame(self.gilddata.getData())\n self.authordata = pd.DataFrame(self.authordata.getData())\n\n def process(self):\n self.handleErrorCases()\n self.threaddata.drop([\"author_ids\"], axis=1, inplace=True)\n self.mergeData()\n\n def handleErrorCases(self):\n self.authordata = self.dropDuplicatesAndEmptyRows(self.authordata, ['author_ids'])\n self.commentdata = self.dropDuplicatesAndEmptyRows(self.commentdata, ['comment_ids'])\n self.gilddata = self.dropDuplicatesAndEmptyRows(self.gilddata, ['comment_ids'])\n self.threaddata = self.dropDuplicatesAndEmptyRows(self.threaddata, ['thread_ids'])\n\n def dropDuplicatesAndEmptyRows(self, data, key_columns):\n df = data.copy()\n df.drop_duplicates(subset=key_columns, keep='last', inplace=True, ignore_index=True)\n return df.dropna()\n\n def mergeData(self):\n commentsAndThreads = self.mergeCommentsAndThreads()\n self.mergeWithGilds(commentsAndThreads)\n transformGilds = gildsToBinary()\n self.MergedData, self.targets = transformGilds.transform(self.MergedData)\n \n def mergeCommentsAndThreads(self):\n commentsAndThreads = self.commentdata.merge(self.threaddata, how=\"left\", on=\"thread_ids\", suffixes=(\"_comment\", \"_thread\"))\n commentsAndThreads.dropna(inplace=True)\n commentsAndThreads[\"comment_age\"] = commentsAndThreads[\"created_utc_comment\"] - commentsAndThreads[\"created_utc_thread\"]\n commentsAndThreads = commentsAndThreads.filter([\"comment_body\", \"ups\", \"comment_ids\", \"edited_comment\", \"upvotes\"\n , \"premium\", \"num_comments\", \"author_ids\", \"comment_age\"], axis=1)\n commentsAndThreads = commentsAndThreads.rename(columns={\"upvotes\":\"Thread_upvotes\", \"ups\":\"comment_upvotes\"})\n return commentsAndThreads\n\n def mergeWithGilds(self, commentsAndThreads):\n commentsThreadsGilds = self.gilddata.merge(commentsAndThreads, how='outer', on='comment_ids')\n self.MergedData = commentsThreadsGilds.merge(self.authordata, how='inner', on='author_ids')\n\n def splitAndSave(self):\n splits = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\n for train_index, test_index in splits.split(self.MergedData, self.targets):\n pass\n trainSet = self.MergedData.iloc[train_index, :]\n testSet = self.MergedData.iloc[test_index, :]\n trainSet.reset_index(drop=True, inplace=True)\n testSet.reset_index(drop=True, inplace=True)\n self.saveJson(trainSet, \"trainSet_baseline\")\n self.saveJson(testSet, \"testSet_baseline\")\n\n def saveJson(self, df, filename):\n Path(self.savepath).mkdir(parents=True, exist_ok=True)\n filename = f\"{self.savepath}{filename}.json\"\n df.to_json(filename, orient=\"columns\")\n \nclass gildsToBinary(BaseEstimator, TransformerMixin):\n '''Using Sklearn's base transformer class to process the gildings column (convert the dictionary into binary)'''\n def fit(self, X, y=None):\n return self\n def transform(self, X):\n df = X.copy()\n df[\"gildings\"].fillna(0, inplace=True)\n df[\"gildings\"] = df[\"gildings\"].apply(lambda x: 1 if x != 0 else 0)\n return df, df[\"gildings\"]\n\ndef build_parser():\n loadpath = \"../../data/raw/\"\n savepath = \"../../data/processed/\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-l\", \"--loadpath\",\n help=\"Load folder for raw files\", type=str, default=loadpath)\n parser.add_argument(\"-s\", \"--savepath\",\n help=\"Save folder for processed files\", type=str, default=savepath)\n return parser\n\n\ndef main():\n parser = build_parser()\n args = parser.parse_args()\n Preprocess = preprocess(args)\n Preprocess.process()\n Preprocess.splitAndSave()\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"sklearn.model_selection.StratifiedShuffleSplit"
]
] |
xingmimfl/pytorch_LSGAN
|
[
"a744cd05c925111e807613f7fb4ed42cd99589c6"
] |
[
"models/lsgan.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.parallel\n\nclass LSGAN_D(nn.Module):\n def __init__(self):\n super(LSGAN_D, self).__init__()\n layers = [\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=5, stride=2,bias=False),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.2),\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2,bias=False),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2),\n nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2,bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.2),\n nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2,bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2),\n ]\n self.main = nn.Sequential(*layers)\n self.linear = nn.Linear(512*6*6, 1)\n\n def forward(self, input):\n output = self.main(input)\n output = output.view(output.size(0), -1)\n output = self.linear(output)\n return output\n\nclass LSGAN_G(nn.Module):\n def __init__(self):\n super(LSGAN_G, self).__init__()\n layers = [\n nn.ConvTranspose2d(in_channels=256, out_channels=256, kernel_size=3, stride=2, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(in_channels=256, out_channels=256, kernel_size=3, stride=1,bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(in_channels=256, out_channels=256, kernel_size=3, stride=2, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=3, stride=2, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(in_channels=64, out_channels=3, kernel_size=3, stride=1, bias=False),\n ]\n self.main = nn.Sequential(*layers)\n\n self.linear = nn.Sequential(\n nn.Linear(1024, 7*7*256),\n nn.BatchNorm2d(7*7*256),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, input):\n output = self.linear(input)\n output = output.view(-1,256,7,7)\n output = self.main(output)\n return output \n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
BkyuChoi/HpicFlatland
|
[
"bdbba7ce451eb72dc760993b96cec4772a08983c"
] |
[
"Test6multi.py"
] |
[
"import os\nimport random\nimport sys\nfrom argparse import ArgumentParser, Namespace\nfrom pathlib import Path\nfrom pprint import pprint\n\nfrom flatland.utils.rendertools import RenderTool\nfrom torch.utils.tensorboard import SummaryWriter\nimport numpy as np\nimport torch\n\nfrom flatland.envs.rail_env import RailEnv, RailEnvActions\nfrom flatland.envs.rail_generators import sparse_rail_generator\nfrom flatland.envs.schedule_generators import sparse_schedule_generator\nfrom flatland.envs.observations import TreeObsForRailEnv\n\nfrom flatland.envs.malfunction_generators import malfunction_from_params, MalfunctionParameters\nfrom flatland.envs.predictions import ShortestPathPredictorForRailEnv\n\nbase_dir = Path(__file__).resolve().parent.parent\nsys.path.append(str(base_dir))\n\nfrom observation_utils import normalize_observation\n\nfrom timer import Timer\nfrom dddqn_policy import DDDQNPolicy\n\nfrom flatland.envs.rail_env_utils import load_flatland_environment_from_file\n\ntry:\n import wandb\n\n wandb.init(sync_tensorboard=True)\nexcept ImportError:\n print(\"Install wandb to log to Weights & Biases\")\n\n\"\"\"\nThis file shows how to train multiple agents using a reinforcement learning approach.\n\nDocumentation: https://flatland.aicrowd.com/getting-started/rl/multi-agent.html\nResults: https://app.wandb.ai/masterscrat/flatland-examples-reinforcement_learning/reports/Flatland-Examples--VmlldzoxNDI2MTA\n\"\"\"\n\nSUPPRESS_OUTPUT = False\n\nif SUPPRESS_OUTPUT:\n # ugly hack to be able to run hyperparameters sweeps with w&b\n # they currently have a bug which prevents runs that output emojis to run :(\n def print(*args, **kwargs):\n pass\n\ndef train_agent(train_params):\n\n env = load_flatland_environment_from_file(\"scratch/test-envs/Test_6/Level_0.pkl\")\n env.reset(regenerate_schedule=True, regenerate_rail=True )\n # Environment parameters\n n_agents = len(env.agents)\n print(\"n_agents= \", n_agents)\n print(\"env.get_num_agents(): \",env.get_num_agents())\n x_dim = env.width\n y_dim = env.height\n n_cities = 9 # 직접 설정해줄때 Test2환경 기준 3\n #max_rails_between_cities = env_params.max_rails_between_cities\n #max_rails_in_city = env_params.max_rails_in_city\n seed = 2125\n\n # Observation parameters\n # observation_tree_depth = env_params.observation_tree_depth\n # observation_radius = env_params.observation_radius\n # observation_max_path_depth = env_params.observation_max_path_depth\n observation_tree_depth = 2\n observation_radius = 10\n observation_max_path_depth = 30\n\n # Training parameters\n eps_start = train_params.eps_start\n eps_end = train_params.eps_end\n eps_decay = train_params.eps_decay\n n_episodes = train_params.n_episodes\n checkpoint_interval = train_params.checkpoint_interval\n n_eval_episodes = train_params.n_evaluation_episodes\n\n # Set the seeds\n random.seed(seed)\n np.random.seed(seed)\n\n # Break agents from time to time\n malfunction_parameters = MalfunctionParameters(\n malfunction_rate=1. / 10000, # Rate of malfunctions\n min_duration=15, # Minimal duration\n max_duration=50 # Max duration\n )\n\n # Observation builder\n predictor = ShortestPathPredictorForRailEnv(observation_max_path_depth)\n tree_observation = TreeObsForRailEnv(max_depth=observation_tree_depth, predictor=predictor)\n\n # Fraction of train which each speed\n speed_profiles = {\n 1.: 1.0, # Fast passenger train\n 1. / 2.: 0.0, # Fast freight train\n 1. / 3.: 0.0, # Slow commuter train\n 1. / 4.: 0.0 # Slow freight train\n }\n\n # Setup the environment\n # env = RailEnv(\n # width=x_dim,\n # height=y_dim,\n # rail_generator=sparse_rail_generator(\n # max_num_cities=n_cities,\n # grid_mode=False,\n # max_rails_between_cities=max_rails_between_cities,\n # max_rails_in_city=max_rails_in_city\n # ),\n # schedule_generator=sparse_schedule_generator(speed_profiles),\n # number_of_agents=n_agents,\n # malfunction_generator_and_process_data=malfunction_from_params(malfunction_parameters),\n # obs_builder_object=tree_observation,\n # random_seed=seed\n # \n\n # env.reset(regenerate_schedule=True, regenerate_rail=True)\n \n # Setup renderer\n if train_params.render:\n env_renderer = RenderTool(env, gl=\"PGL\")\n\n # Calculate the state size given the depth of the tree observation and the number of features\n n_features_per_node = env.obs_builder.observation_dim\n n_nodes = 0\n for i in range(observation_tree_depth + 1):\n n_nodes += np.power(4, i)\n state_size = n_features_per_node * n_nodes\n\n # The action space of flatland is 5 discrete actions\n action_size = 5\n\n # Max number of steps per episode\n # This is the official formula used during evaluations\n # See details in flatland.envs.schedule_generators.sparse_schedule_generator\n max_steps = int(4 * 2 * (env.height + env.width + (n_agents / n_cities)))\n #max_steps = env._max_episode_steps\n print(\"max_steps = \", max_steps)\n print(\"env._max_episode_steps= \",env._max_episode_steps)\n\n action_count = [0] * action_size\n action_dict = dict()\n agent_obs = [None] * env.get_num_agents()\n agent_prev_obs = [None] * env.get_num_agents()\n agent_prev_action = [2] * env.get_num_agents()\n update_values = False\n smoothed_normalized_score = -1.0\n smoothed_eval_normalized_score = -1.0\n smoothed_completion = 0.0\n smoothed_eval_completion = 0.0\n\n # Double Dueling DQN policy\n policy = DDDQNPolicy(state_size, action_size, train_params)\n\n # TensorBoard writer\n writer = SummaryWriter()\n writer.add_hparams(vars(train_params), {})\n #writer.add_hparams(vars(env_params), {})\n\n training_timer = Timer()\n training_timer.start()\n\n print(\"\\n🚉 Training {} trains on {}x{} grid for {} episodes, evaluating on {} episodes every {} episodes.\\n\"\n .format(env.get_num_agents(), x_dim, y_dim, n_episodes, n_eval_episodes, checkpoint_interval))\n\n model = policy.qnetwork_local\n optimizer = policy.optimizer\n\n # checkpoint = torch.load('./checkpoints/test_multi-700.pth')\n # model.load_state_dict(checkpoint['model_state_dict'])\n # optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n # checkepisode_idx = checkpoint['episode_idx']\n # eps_start = checkpoint['eps_start']\n # smoothed_completion = checkpoint['Avg']\n # print(checkepisode_idx)\n for episode_idx in range(n_episodes+1):\n #for episode_idx in range(checkepisode_idx,(n_episodes + 1)):\n # Timers\n step_timer = Timer()\n reset_timer = Timer()\n learn_timer = Timer()\n preproc_timer = Timer()\n\n # Reset environment\n reset_timer.start()\n obs, info = env.reset(regenerate_rail=True, regenerate_schedule=True)\n reset_timer.end()\n\n if train_params.render:\n env_renderer.set_new_rail()\n\n score = 0\n nb_steps = 0\n actions_taken = []\n\n # Build agent specific observations\n for agent in env.get_agent_handles():\n if obs[agent]:\n agent_obs[agent] = normalize_observation(obs[agent], observation_tree_depth, observation_radius=observation_radius)\n agent_prev_obs[agent] = agent_obs[agent].copy()\n\n # Run episode\n for step in range(max_steps - 1):\n for agent in env.get_agent_handles():\n if info['action_required'][agent]:\n # If an action is required, we want to store the obs at that step as well as the action\n update_values = True\n action = policy.act(agent_obs[agent], eps=eps_start)\n action_count[action] += 1\n actions_taken.append(action)\n else:\n update_values = False\n action = 0\n action_dict.update({agent: action})\n\n # Environment step\n step_timer.start()\n next_obs, all_rewards, done, info = env.step(action_dict)\n step_timer.end()\n\n if train_params.render and episode_idx % checkpoint_interval == 0:\n env_renderer.render_env(\n show=True,\n frames=False,\n show_observations=False,\n show_predictions=False\n )\n\n for agent in range(env.get_num_agents()):\n # Update replay buffer and train agent\n # Only update the values when we are done or when an action was taken and thus relevant information is present\n if update_values or done[agent]:\n learn_timer.start()\n policy.step(agent_prev_obs[agent], agent_prev_action[agent], all_rewards[agent], agent_obs[agent], done[agent])\n learn_timer.end()\n\n agent_prev_obs[agent] = agent_obs[agent].copy()\n agent_prev_action[agent] = action_dict[agent]\n\n # Preprocess the new observations\n if next_obs[agent]:\n preproc_timer.start()\n agent_obs[agent] = normalize_observation(next_obs[agent], observation_tree_depth, observation_radius=observation_radius)\n preproc_timer.end()\n\n score += all_rewards[agent]\n\n nb_steps = step\n\n if done['__all__']:\n break\n\n # Epsilon decay\n eps_start = max(eps_end, eps_decay * eps_start)\n\n # Collection information about training\n tasks_finished = sum(done[idx] for idx in env.get_agent_handles())\n completion = tasks_finished / max(1, env.get_num_agents())\n normalized_score = score / (max_steps * env.get_num_agents())\n action_probs = action_count / np.sum(action_count)\n action_count = [1] * action_size\n\n # Smoothed values for terminal display and for more stable hyper-parameter tuning\n smoothing = 0.99\n smoothed_normalized_score = smoothed_normalized_score * smoothing + normalized_score * (1.0 - smoothing)\n smoothed_completion = smoothed_completion * smoothing + completion * (1.0 - smoothing)\n\n # Print logs\n if episode_idx % checkpoint_interval == 0:\n #save_checkpoint(episode_idx, policy.qnetwork_local, policy.qnetwork_local.optimizer, './checkpoints/test'+str(episode_idx)+'.pth')\n #torch.save(policy.qnetwork_local, './checkpoints/obs2_multi-' + str(episode_idx) + '.pth')\n torch.save(\n {'episode_idx':episode_idx,\n 'model_state_dict':policy.qnetwork_local.state_dict(),\n 'optimizer_state_dict':policy.optimizer.state_dict(),\n 'eps_start':eps_start,\n 'Avg':smoothed_completion\n #'loss': policy.qnetwork_local.loss\n }, './checkpoints/test6_multi-' + str(episode_idx) + '.pth')\n if train_params.render:\n env_renderer.close_window()\n\n print(\n '\\r🚂 Episode {}'\n '\\t 🏆 Score: {:.3f}'\n ' Avg: {:.3f}'\n '\\t 💯 Done: {:.2f}%'\n ' Avg: {:.2f}%'\n '\\t 🎲 Epsilon: {:.2f} '\n '\\t 🔀 Action Probs: {}'.format(\n episode_idx,\n normalized_score,\n smoothed_normalized_score,\n 100 * completion,\n 100 * smoothed_completion,\n eps_start,\n format_action_prob(action_probs)\n ), end=\" \")\n\n # Evaluate policy\n if episode_idx % train_params.checkpoint_interval == 0:\n scores, completions, nb_steps_eval = eval_policy(env, policy, n_eval_episodes, max_steps)\n writer.add_scalar(\"evaluation/scores_min\", np.min(scores), episode_idx)\n writer.add_scalar(\"evaluation/scores_max\", np.max(scores), episode_idx)\n writer.add_scalar(\"evaluation/scores_mean\", np.mean(scores), episode_idx)\n writer.add_scalar(\"evaluation/scores_std\", np.std(scores), episode_idx)\n writer.add_histogram(\"evaluation/scores\", np.array(scores), episode_idx)\n writer.add_scalar(\"evaluation/completions_min\", np.min(completions), episode_idx)\n writer.add_scalar(\"evaluation/completions_max\", np.max(completions), episode_idx)\n writer.add_scalar(\"evaluation/completions_mean\", np.mean(completions), episode_idx)\n writer.add_scalar(\"evaluation/completions_std\", np.std(completions), episode_idx)\n writer.add_histogram(\"evaluation/completions\", np.array(completions), episode_idx)\n writer.add_scalar(\"evaluation/nb_steps_min\", np.min(nb_steps_eval), episode_idx)\n writer.add_scalar(\"evaluation/nb_steps_max\", np.max(nb_steps_eval), episode_idx)\n writer.add_scalar(\"evaluation/nb_steps_mean\", np.mean(nb_steps_eval), episode_idx)\n writer.add_scalar(\"evaluation/nb_steps_std\", np.std(nb_steps_eval), episode_idx)\n writer.add_histogram(\"evaluation/nb_steps\", np.array(nb_steps_eval), episode_idx)\n\n smoothing = 0.9\n smoothed_eval_normalized_score = smoothed_eval_normalized_score * smoothing + np.mean(scores) * (1.0 - smoothing)\n smoothed_eval_completion = smoothed_eval_completion * smoothing + np.mean(completions) * (1.0 - smoothing)\n writer.add_scalar(\"evaluation/smoothed_score\", smoothed_eval_normalized_score, episode_idx)\n writer.add_scalar(\"evaluation/smoothed_completion\", smoothed_eval_completion, episode_idx)\n\n # Save logs to tensorboard\n writer.add_scalar(\"training/score\", normalized_score, episode_idx)\n writer.add_scalar(\"training/smoothed_score\", smoothed_normalized_score, episode_idx)\n writer.add_scalar(\"training/completion\", np.mean(completion), episode_idx)\n writer.add_scalar(\"training/smoothed_completion\", np.mean(smoothed_completion), episode_idx)\n writer.add_scalar(\"training/nb_steps\", nb_steps, episode_idx)\n writer.add_histogram(\"actions/distribution\", np.array(actions_taken), episode_idx)\n writer.add_scalar(\"actions/nothing\", action_probs[RailEnvActions.DO_NOTHING], episode_idx)\n writer.add_scalar(\"actions/left\", action_probs[RailEnvActions.MOVE_LEFT], episode_idx)\n writer.add_scalar(\"actions/forward\", action_probs[RailEnvActions.MOVE_FORWARD], episode_idx)\n writer.add_scalar(\"actions/right\", action_probs[RailEnvActions.MOVE_RIGHT], episode_idx)\n writer.add_scalar(\"actions/stop\", action_probs[RailEnvActions.STOP_MOVING], episode_idx)\n writer.add_scalar(\"training/epsilon\", eps_start, episode_idx)\n writer.add_scalar(\"training/buffer_size\", len(policy.memory), episode_idx)\n writer.add_scalar(\"training/loss\", policy.loss, episode_idx)\n writer.add_scalar(\"timer/reset\", reset_timer.get(), episode_idx)\n writer.add_scalar(\"timer/step\", step_timer.get(), episode_idx)\n writer.add_scalar(\"timer/learn\", learn_timer.get(), episode_idx)\n writer.add_scalar(\"timer/preproc\", preproc_timer.get(), episode_idx)\n writer.add_scalar(\"timer/total\", training_timer.get_current(), episode_idx)\n\n\ndef format_action_prob(action_probs):\n action_probs = np.round(action_probs, 3)\n actions = [\"↻\", \"←\", \"↑\", \"→\", \"◼\"]\n\n buffer = \"\"\n for action, action_prob in zip(actions, action_probs):\n buffer += action + \" \" + \"{:.3f}\".format(action_prob) + \" \"\n\n return buffer\n\n\ndef eval_policy(env, policy, n_eval_episodes, max_steps):\n action_dict = dict()\n scores = []\n completions = []\n nb_steps = []\n\n for episode_idx in range(n_eval_episodes):\n agent_obs = [None] * env.get_num_agents()\n score = 0.0\n\n obs, info = env.reset(regenerate_rail=True, regenerate_schedule=True)\n\n final_step = 0\n\n for step in range(max_steps - 1):\n for agent in env.get_agent_handles():\n if obs[agent]:\n # TODO pass parameters properly\n # agent_obs[agent] = normalize_observation(obs[agent], tree_depth=2, observation_radius=10)\n agent_obs[agent] = normalize_observation(obs[agent], tree_depth=2, observation_radius=10)\n\n action = 0\n if info['action_required'][agent]:\n action = policy.act(agent_obs[agent], eps=0.0)\n action_dict.update({agent: action})\n\n obs, all_rewards, done, info = env.step(action_dict)\n\n for agent in env.get_agent_handles():\n score += all_rewards[agent]\n\n final_step = step\n\n if done['__all__']:\n break\n\n normalized_score = score / (max_steps * env.get_num_agents())\n scores.append(normalized_score)\n\n tasks_finished = sum(done[idx] for idx in env.get_agent_handles())\n completion = tasks_finished / max(1, env.get_num_agents())\n completions.append(completion)\n\n nb_steps.append(final_step)\n\n print(\"\\t✅ Eval: score {:.3f} done {:.1f}%\".format(np.mean(scores), np.mean(completions) * 100.0))\n\n return scores, completions, nb_steps\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"-n\", \"--n_episodes\", dest=\"n_episodes\", help=\"number of episodes to run\", default=2500, type=int)\n parser.add_argument(\"--n_evaluation_episodes\", dest=\"n_evaluation_episodes\", help=\"number of evaluation episodes\", default=25, type=int)\n parser.add_argument(\"--checkpoint_interval\", dest=\"checkpoint_interval\", help=\"checkpoint interval\", default=100, type=int)\n parser.add_argument(\"--eps_start\", dest=\"eps_start\", help=\"max exploration\", default=1.0, type=float)\n parser.add_argument(\"--eps_end\", dest=\"eps_end\", help=\"min exploration\", default=0.01, type=float)\n parser.add_argument(\"--eps_decay\", dest=\"eps_decay\", help=\"exploration decay\", default=0.998, type=float)\n parser.add_argument(\"--buffer_size\", dest=\"buffer_size\", help=\"replay buffer size\", default=int(1e6), type=int)\n parser.add_argument(\"--buffer_min_size\", dest=\"buffer_min_size\", help=\"min buffer size to start training\", default=0, type=int)\n parser.add_argument(\"--batch_size\", dest=\"batch_size\", help=\"minibatch size\", default=32, type=int)\n parser.add_argument(\"--gamma\", dest=\"gamma\", help=\"discount factor\", default=0.99, type=float)\n parser.add_argument(\"--tau\", dest=\"tau\", help=\"soft update of target parameters\", default=1e-3, type=float)\n parser.add_argument(\"--learning_rate\", dest=\"learning_rate\", help=\"learning rate\", default=0.52e-4, type=float)\n parser.add_argument(\"--hidden_size\", dest=\"hidden_size\", help=\"hidden size (2 fc layers)\", default=256, type=int)\n parser.add_argument(\"--update_every\", dest=\"update_every\", help=\"how often to update the network\", default=8, type=int)\n parser.add_argument(\"--use_gpu\", dest=\"use_gpu\", help=\"use GPU if available\", default=True, type=bool)\n parser.add_argument(\"--num_threads\", dest=\"num_threads\", help=\"number of threads to use\", default=1, type=int)\n parser.add_argument(\"--render\", dest=\"render\", help=\"render 1 episode in 100\", default=False, type=bool)\n training_parameters = parser.parse_args()\n\n print(\"\\nTraining parameters:\")\n pprint(vars(training_parameters))\n\n # environment_parameters = {\n # # small_v0 config\n # \"n_agents\": 5,\n # \"x_dim\": 35,\n # \"y_dim\": 35,\n # \"n_cities\": 4,\n # \"max_rails_between_cities\": 2,\n # \"max_rails_in_city\": 3,\n\n # \"seed\": 42,\n # \"observation_tree_depth\": 2,\n # \"observation_radius\": 10,\n # \"observation_max_path_depth\": 30\n # }\n\n # print(\"\\nEnvironment parameters:\")\n # pprint(environment_parameters)\n\n # environment_parameters = {\n # # small_v0 config\n # \"n_agents\": len(env.agents),\n # \"x_dim\": env.width,\n # \"y_dim\": env.height,\n # \"n_cities\": env.n_cities,\n # \"max_rails_between_cities\": env.max_rails_between_cities,\n # \"max_rails_in_city\": env.max_rails_in_city,\n\n # \"seed\": 42,\n # \"observation_tree_depth\": 2,\n # \"observation_radius\": 10,\n # \"observation_max_path_depth\": 30\n # }\n\n os.environ[\"OMP_NUM_THREADS\"] = str(training_parameters.num_threads)\n train_agent( training_parameters)"
] |
[
[
"numpy.random.seed",
"numpy.power",
"numpy.min",
"numpy.round",
"numpy.max",
"numpy.std",
"numpy.mean",
"torch.utils.tensorboard.SummaryWriter",
"numpy.array",
"numpy.sum"
]
] |
s0phia-/ipse
|
[
"a2113762c63086d8d1614abb10ed9f898aef35f0",
"a2113762c63086d8d1614abb10ed9f898aef35f0"
] |
[
"agents/NN_EW.py",
"agents/LSPI.py"
] |
[
"import numpy as np\nimport random\nimport math\nfrom agents.stew.utils import create_diff_matrix\n\n\nclass EwDefaultDict(dict):\n\n def __missing__(self, key):\n return create_diff_matrix(key)\n\n\nclass NeuralNetwork:\n def __init__(self, x_size, y_size):\n self.x = x # input\n self.y = y # target\n self.weights_layer_1 = np.random.rand(self.x.shape[1], 12) # weights input to hidden_1\n self.hidden_1 = None # hidden layer 1\n self.weights_layer_2 = np.random.rand(12, self.y.shape) # weights hidden_1 to output\n self.y_hat = np.zeros(self.y.shape) # output layer\n\n @staticmethod\n def sigmoid(x):\n return 1 / (1 + math.exp(-x))\n\n @staticmethod\n def d_sigmoid(x):\n return x * (1 - x)\n\n def feedforward(self, activation):\n self.hidden_1 = activation(np.dot(self.x, self.weights_layer_1))\n self.y_hat = activation(np.dot(self.hidden_1, self.weights_layer_2))\n\n @staticmethod\n def derivative_ew(x):\n return np.dot(EwDefaultDict[x.shape[0]], x)\n\n def backprop(self, d_activation):\n \"\"\"\n loss function: 1/2||y-y_hat||^2_2 + lam*D*w\n where D the equal weight regularisation matrix\n Regularises across layers\n \"\"\"\n err = self.y - self.y_hat\n z1 = np.dot(self.weights_layer_1, self.x)\n z2 = np.dot(self.weights_layer_2, self.hidden_1)\n\n d_weights_layer_2 = np.dot(self.hidden_1.transpose, err * d_activation(z2)) +\\\n self.derivative_ew(self.weights_layer_2)\n d_weights_layer_1 = np.dot(self.x.transpose,\n np.dot(err * d_activation(z2), self.weights_layer_2.transpose)\n * d_activation(z1))\n self.weights_layer_1 += d_weights_layer_1\n self.weights_layer_2 += d_weights_layer_2\n\n\nclass DqnEwAgent:\n \"\"\"\n A DQN style agent with equal weights regularisation.\n \"\"\"\n\n def __init__(self, num_features, actions, regularisation_strength, exploration=.15):\n self.num_actions = actions.n\n self.num_features = num_features\n self.lam = regularisation_strength\n self.epsilon = exploration\n self.actions = actions\n self.learning_rate = 0.01\n self.gamma = 0.9 # discount factor\n self.batch_size = 32\n self.buffer_size = 1e5\n self.nn = NeuralNetwork()\n\n def epsilon_greedy(self, state):\n if random.uniform(0, 1) < self.epsilon:\n action = self.actions.sample()\n else:\n action = self.get_highest_q_action(state)[0]\n return action\n\n def learn(self):\n pass\n\n def run(self):\n pass\n\n def get_highest_q_action(self, state):\n pass\n",
"import numpy as np\nimport random\nimport time\nfrom agents.stew.utils import create_diff_matrix\nfrom utils import random_tiebreak_argmax\n\n\nclass Lstdq:\n \"\"\"\n Least squares temporal difference Q learning\n \"\"\"\n def __init__(self, num_features, num_actions, policy, source_of_samples, regularisation_strength=None):\n self.num_features = num_features\n self.num_actions = num_actions\n self.lam = regularisation_strength\n self.gamma = .95 # discount factor\n self.matrix_A = np.zeros([self.num_features*self.num_actions, self.num_features*self.num_actions])\n self.vector_b = np.zeros([self.num_features * self.num_actions])\n self.samples = source_of_samples # called D in LSPI paper\n self.policy_matrix = policy\n\n def apply_bf(self, state, action):\n sa_bf = np.zeros([self.num_actions, self.num_features])\n sa_bf[action] = state\n return sa_bf.flatten()\n\n def greedy_policy(self, state):\n q_values = np.matmul(self.policy_matrix, state)\n return random_tiebreak_argmax(q_values)\n\n def find_a_and_b(self):\n for d_i in self.samples:\n state, action, reward, state_prime = d_i\n state_action = self.apply_bf(state, action)\n state_action_prime = self.apply_bf(state_prime, self.greedy_policy(state_prime))\n x = state_action - self.gamma * state_action_prime\n self.matrix_A += np.matmul(state_action.reshape([state_action.shape[0], 1]), x.reshape([1, x.shape[0]]))\n self.vector_b += state_action * reward\n\n def fit(self):\n self.find_a_and_b()\n policy = np.matmul(np.linalg.inv(self.matrix_A), self.vector_b)\n return policy.reshape([self.num_actions, self.num_features])\n\n\nclass LstdqEw(Lstdq):\n \"\"\"\n Least squares temporal difference Q learning, regularised with equal weights\n \"\"\"\n def __init__(self, num_features, num_actions, policy, source_of_samples, regularisation_strength):\n super().__init__(num_features, num_actions, policy, source_of_samples, regularisation_strength)\n self.matrix_DtD = create_diff_matrix(num_features*num_actions) # ew regularisation matrix DtD\n\n def fit(self):\n self.find_a_and_b()\n policy = np.matmul(np.linalg.inv(self.matrix_A + self.lam*self.matrix_DtD), self.vector_b)\n return policy.reshape([self.num_actions, self.num_features])\n\n\nclass LstdqL2(Lstdq):\n \"\"\"\n Least squares temporal difference Q learning, regularised with equal weights\n \"\"\"\n def __init__(self, num_features, num_actions, policy, source_of_samples, regularisation_strength):\n super().__init__(num_features, num_actions, policy, source_of_samples, regularisation_strength)\n self.matrix_id = np.eye(num_features * num_actions)\n\n def fit(self):\n self.find_a_and_b()\n policy = np.matmul(np.linalg.inv(self.matrix_A + self.lam*self.matrix_id), self.vector_b)\n return policy.reshape([self.num_actions, self.num_features])\n\n\nclass LspiAgent:\n def __init__(self, num_features, actions, regularisation_strength, max_samples=10**6, source_of_samples=[]):\n self.source_of_samples = source_of_samples\n self.max_samples = max_samples\n self.num_features = num_features\n self.num_actions = actions.n\n self.reg_strength = regularisation_strength\n self.policy = np.zeros([self.num_actions, self.num_features])\n self.epsilon = 1 # TODO\n self.action_space = actions\n self.model = Lstdq\n\n def epsilon_greedy(self, state):\n if random.uniform(0, 1) < self.epsilon:\n action = self.action_space.sample()\n else:\n q_values = np.matmul(self.policy, state)\n action = random_tiebreak_argmax(q_values)\n return action\n\n def learn(self, stopping_criteria, max_out=100000):\n diff = stopping_criteria + 1\n i = 1\n for i in range(max_out):\n if diff > stopping_criteria:\n w_old = self.policy\n agent = self.model(self.num_features, self.num_actions, w_old, self.source_of_samples,\n self.reg_strength)\n w_new = agent.fit()\n self.policy = w_new\n diff = np.linalg.norm(w_old-w_new)\n i += 1\n else:\n break\n return w_new\n\n def collect_experience(self, episodes, env, max_episode_length, sleep_time):\n new_samples = []\n for i in range(episodes):\n env.reset()\n state = env.state_features\n for _ in range(max_episode_length):\n time.sleep(sleep_time)\n action = self.epsilon_greedy(state)\n _, reward, done, info = env.step(action)\n state_prime = info[\"state_features\"]\n new_samples.append([state, action, reward, state_prime])\n state = state_prime\n if done or len(new_samples) >= self.max_samples:\n break\n self.source_of_samples += new_samples\n if len(self.source_of_samples) >= self.max_samples: # if too many samples, only keep last N\n self.source_of_samples = self.source_of_samples[-self.max_samples:]\n\n def run(self, episodes, env, stopping_criteria, max_episode_length=200, sleep_time=0):\n self.collect_experience(episodes=episodes, env=env, max_episode_length=max_episode_length,\n sleep_time=sleep_time)\n self.learn(stopping_criteria=stopping_criteria)\n\n\nclass LspiAgentEw(LspiAgent):\n def __init__(self, num_features, actions, regularisation_strength, max_samples=10**5, source_of_samples=[]):\n super().__init__(num_features, actions, regularisation_strength, max_samples, source_of_samples)\n self.model = LstdqEw\n\n\nclass LspiAgentL2(LspiAgent):\n def __init__(self, num_features, actions, regularisation_strength, max_samples=10**5, source_of_samples=[]):\n super().__init__(num_features, actions, regularisation_strength, max_samples, source_of_samples)\n self.model = LstdqL2\n"
] |
[
[
"numpy.dot",
"numpy.zeros",
"numpy.random.rand"
],
[
"numpy.linalg.inv",
"numpy.eye",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.zeros"
]
] |
daaltces/pydaal-tutorials
|
[
"ed47e8eef30f63421968232713552c0afad5cfae"
] |
[
"2-pre-built-helper-classes/DecisionForest/DF-classification-usage-example.py"
] |
[
"import sys,os\r\nsys.path.append(os.path.join(os.path.dirname(sys.executable),'share','pydaal_examples','examples','python','source'))\r\nimport numpy as np\r\nfrom DecisionForest import Classification\r\nfrom daal.data_management import HomogenNumericTable\r\nfrom utils import printNumericTables,printNumericTable\r\nfrom sklearn.datasets import load_digits\r\nfrom sklearn.model_selection import train_test_split\r\nfrom daal.algorithms import decision_forest, classifier\r\n\r\n\r\n# Create train and test datasets\r\n#***Binary classifier***\r\nprint(\"**** Binary Classifier****\")\r\ndata = load_digits(n_class=2)\r\nx = data.data\r\ny = data.target\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.40, random_state=42)\r\ntrainData = HomogenNumericTable(x_train)\r\ntestData=HomogenNumericTable(x_test)\r\nnD_y_train= y_train[:,np.newaxis]\r\ntrainDependentVariables= HomogenNumericTable(nD_y_train)\r\nnD_y_test = y_test[:,np.newaxis]\r\ntestGroundTruth = HomogenNumericTable(nD_y_test)\r\n\r\n'''\r\nInstantiate Decision Forest object: Classification(nClasses, nTrees = 100, observationsPerTreeFraction = 1,featuresPerNode=0,maxTreeDepth=0,\r\n\t\t\t\t minObservationsInLeafNodes=1,impurityThreshold=0,varImportance='MDI')\r\n'''\r\ndaal_DF = Classification(len(np.unique(y)),observationsPerTreeFraction=0.7,varImportance='MDI',resultsToCompute=3)\r\n#Train\r\ntrainingResult = daal_DF.training(trainData,trainDependentVariables)\r\n#Predict\r\npredictResults = daal_DF.predict(trainingResult,testData)\r\n#Evaluate you model\r\nqualityMet = daal_DF.qualityMetrics(predictResults,testGroundTruth)\r\n#print accuracy\r\nprint(\"Accuracy\".format(qualityMet.get('accuracy')))\r\n#print confusion matrix\r\nprintNumericTable(qualityMet.get('confusionMatrix'),\"Confusion Matrix\")\r\n#print all metrics\r\nprint(\"All available metrics\")\r\ndaal_DF.printAllQualityMetrics(qualityMet)\r\n#Serialize and save\r\ndaal_DF.serialize(trainingResult, fileName='DF', useCompression=True)\r\n#Deserialize\r\ndese_trainingRes = daal_DF.deserialize(fileName='DF.npy', useCompression=True)\r\n\r\n#Print predicted responses and actual responses\r\nprintNumericTables (\r\n testGroundTruth, predictResults,\r\n \"Ground truth\", \"Classification results\",\r\n \"Decision Forest classification results (first 20 observations):\", 20, flt64=False\r\n )\r\n\r\n#*****Multi-classifier\r\nprint(\"**** Multi-Classifier****\")\r\ndata = load_digits()\r\nx = data.data\r\ny = data.target\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.40, random_state=42)\r\ntrainData = HomogenNumericTable(x_train)\r\ntestData=HomogenNumericTable(x_test)\r\nnD_y_train= y_train[:,np.newaxis]\r\ntrainDependentVariables= HomogenNumericTable(nD_y_train)\r\nnD_y_test = y_test[:,np.newaxis]\r\ntestGroundTruth = HomogenNumericTable(nD_y_test)\r\n\r\n'''\r\nInstantiate Decision Forest object Classification(nClasses, nTrees = 100, observationsPerTreeFraction = 1,featuresPerNode=0,maxTreeDepth=0,\r\n\t\t\t\t minObservationsInLeafNodes=1,impurityThreshold=0,varImportance='MDI')\r\n'''\r\ndaal_DF = Classification(len(np.unique(y)),observationsPerTreeFraction=0.7)\r\n#Train\r\ntrainingResult = daal_DF.training(trainData,trainDependentVariables)\r\n#Predict\r\npredictResults = daal_DF.predict(trainingResult,testData)\r\n#Evaluate you model\r\nqualityMet = daal_DF.qualityMetrics(predictResults,testGroundTruth)\r\n#print accuracy\r\nprint(\"Accuracy\".format(qualityMet.get('averageAccuracy')))\r\n#print confusion matrix\r\nprintNumericTable(qualityMet.get('confusionMatrix'),\"Confusion Matrix\")\r\n#print all metrics\r\nprint(\"All available metrics\")\r\ndaal_DF.printAllQualityMetrics(qualityMet)\r\n#Serialize and save\r\ndaal_DF.serialize(trainingResult, fileName='DF', useCompression=True)\r\n#Deserialize\r\ndese_trainingRes = daal_DF.deserialize(fileName='DF.npy', useCompression=True)\r\n\r\n#Print predicted responses and actual responses\r\nprintNumericTables (\r\n testGroundTruth, predictResults,\r\n \"Ground truth\", \"Classification results\",\r\n \"Decision Forest classification results (first 20 observations):\", 20, flt64=False\r\n )\r\n\r\n\r\n\r\n"
] |
[
[
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_digits",
"numpy.unique"
]
] |
yiqings/autogluon
|
[
"d41a1ab6a6c440b82f06a25835c719fb9d9e9ebf"
] |
[
"text/src/autogluon/text/automm/models/categorical_transformer.py"
] |
[
"import torch\nfrom torch import nn\nfrom torch import Tensor\nfrom typing import Any, Dict, List, Optional, Union, cast\nfrom ..constants import CATEGORICAL, LABEL, LOGITS, FEATURES\nfrom .ft_transformer import _TokenInitialization, CLSToken, FT_Transformer\n\n\nclass CategoricalFeatureTokenizer(nn.Module):\n \"\"\"\n Feature tokenizer for categorical features in tabular data. \n It transforms the input categorical features to tokens (embeddings).\n\n The categorical features usually refers to discrete features.\n \"\"\"\n\n def __init__(\n self,\n num_categories: List[int],\n d_token: int,\n bias: Optional[bool] = True,\n initialization: Optional[str] = 'normal',\n ) -> None:\n \"\"\"\n Parameters\n ----------\n num_categories: \n A list of integers. Each one is the number of categories in one categorical column.\n d_token: \n The size of one token.\n bias: \n If `True`, for each feature, an additional trainable vector will be added to the\n embedding regardless of feature value. Notablly, the bias are not shared between features.\n initialization: \n Initialization policy for parameters. Must be one of `['uniform', 'normal']`. \n\n References\n ----------\n Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, \n \"Revisiting Deep Learning Models for Tabular Data\", 2021\n https://arxiv.org/pdf/2106.11959.pdf\n \"\"\"\n super().__init__()\n \n self.num_categories = num_categories\n category_offsets = torch.tensor([0] + num_categories[:-1]).cumsum(0)\n\n self.register_buffer('category_offsets', category_offsets, persistent=False)\n self.embeddings = nn.Embedding(sum(num_categories), d_token)\n self.bias = nn.Parameter(Tensor(len(num_categories), d_token)) if bias else None\n initialization_ = _TokenInitialization.from_str(initialization)\n\n for parameter in [self.embeddings.weight, self.bias]:\n if parameter is not None:\n initialization_.apply(parameter, d_token)\n \n @property\n def n_tokens(self) -> int:\n \"\"\"The number of tokens.\"\"\"\n return len(self.num_categories)\n\n @property\n def d_token(self) -> int:\n \"\"\"The size of one token.\"\"\"\n return self.embeddings.embedding_dim\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.embeddings(x + self.category_offsets[None])\n\n if self.bias is not None:\n x = x + self.bias[None]\n\n return x\n\n\nclass CategoricalTransformer(nn.Module):\n \"\"\"\n FT-Transformer for categorical tabular features. \n The input dimension is automatically computed based on\n the number of categories in each categorical column.\n \"\"\"\n\n def __init__(\n self, \n prefix: str, \n num_categories: List[int],\n d_token: int,\n cls_token: Optional[bool] = False,\n out_features: Optional[int] = None,\n num_classes: Optional[int] = 0,\n token_bias: Optional[bool] = True,\n token_initialization: Optional[str] = 'normal',\n n_blocks: Optional[int] = 0,\n attention_n_heads: Optional[int] = 8,\n attention_initialization: Optional[str] = 'kaiming',\n attention_normalization: Optional[str] = 'layer_norm',\n attention_dropout: Optional[str] = 0.2, \n residual_dropout: Optional[str] = 0.0,\n ffn_activation: Optional[str] = 'reglu',\n ffn_normalization: Optional[str] = 'layer_norm',\n ffn_d_hidden: Optional[str] = 6,\n ffn_dropout: Optional[str] = 0.0,\n prenormalization: Optional[bool] = True,\n first_prenormalization: Optional[bool] = False,\n kv_compression_ratio: Optional[float] = None,\n kv_compression_sharing: Optional[str] = None,\n head_activation: Optional[str] = 'relu',\n head_normalization: Optional[str] = 'layer_norm',\n ) -> None :\n \"\"\"\n Parameters\n ----------\n prefix\n The model prefix.\n num_categories\n A list of integers. Each one is the number of categories in one categorical column.\n d_token\n The size of one token for `_CategoricalFeatureTokenizer`.\n cls_token\n If `True`, cls token will be added to the token embeddings.\n out_features\n Dimension of output features.\n num_classes\n Number of classes. 1 for a regression task.\n token_bias\n If `True`, for each feature, an additional trainable vector will be added in `_CategoricalFeatureTokenizer` \n to the embedding regardless of feature value. Notablly, the bias are not shared between features.\n token_initialization\n Initialization policy for parameters in `_CategoricalFeatureTokenizer` and `_CLSToke`. \n Must be one of `['uniform', 'normal']`. \n n_blocks\n Number of the `FT_Transformer` blocks, which should be non-negative.\n attention_n_heads\n Number of attention heads in each `FT_Transformer` block, which should be postive.\n attention_initialization\n Weights initalization scheme for Multi Headed Attention module.\n attention_dropout\n Dropout ratio for the Multi Headed Attention module.\n residual_dropout\n Dropout ratio for the linear layers in FT_Transformer block.\n ffn_activation\n Activation function type for the Feed-Forward Network module.\n ffn_normalization\n Normalization scheme of the Feed-Forward Network module.\n ffn_d_hidden\n Number of the hidden nodes of the linaer layers in the Feed-Forward Network module.\n ffn_dropout\n Dropout ratio of the hidden nodes of the linaer layers in the Feed-Forward Network module.\n prenormalization, first_prenormalization\n Prenormalization to stablize the training.\n kv_compression_ratio\n The compression ration to reduce the input sequence length.\n kv_compression_sharing\n If `true` the projections will share weights.\n head_activation\n Activation function type of the MLP layer.\n head_normalization\n Normalization scheme of the MLP layer.\n\n References\n ----------\n Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, \n \"Revisiting Deep Learning Models for Tabular Data\", 2021\n https://arxiv.org/pdf/2106.11959.pdf\n \"\"\"\n\n super().__init__()\n\n assert num_categories, 'num_categories must be non-empty'\n assert d_token > 0, 'd_token must be positive'\n assert n_blocks >= 0, 'n_blocks must be non-negative' \n assert attention_n_heads > 0, 'attention_n_heads must be postive'\n assert token_initialization in ['uniform', 'normal'], 'initialization must be uniform or normal'\n\n self.num_categories = num_categories\n\n self.prefix = prefix\n self.out_features = out_features\n\n\n self.categorical_feature_tokenizer = CategoricalFeatureTokenizer(\n num_categories=num_categories,\n d_token=d_token,\n bias=token_bias,\n initialization=token_initialization,\n ) \n\n self.cls_token = CLSToken(\n d_token=d_token, \n initialization=token_initialization,\n ) if cls_token else nn.Identity()\n\n if kv_compression_ratio is not None: \n n_tokens = self.categorical_feature_tokenizer.n_tokens + 1\n else:\n n_tokens = None\n\n self.transformer = FT_Transformer(\n d_token=d_token,\n n_blocks=n_blocks,\n attention_n_heads=attention_n_heads,\n attention_dropout=attention_dropout,\n attention_initialization=attention_initialization,\n attention_normalization=attention_normalization,\n ffn_d_hidden=ffn_d_hidden,\n ffn_dropout=ffn_dropout,\n ffn_activation=ffn_activation,\n ffn_normalization=ffn_normalization,\n residual_dropout=residual_dropout,\n prenormalization=prenormalization,\n first_prenormalization=first_prenormalization,\n last_layer_query_idx=None,\n n_tokens=n_tokens,\n kv_compression_ratio=kv_compression_ratio,\n kv_compression_sharing=kv_compression_sharing,\n head_activation=head_activation,\n head_normalization=head_normalization,\n d_out=out_features,\n )\n\n self.head = FT_Transformer.Head(\n d_in=d_token,\n d_out=num_classes,\n bias=True,\n activation=head_activation, \n normalization=head_normalization if prenormalization else 'Identity',\n )\n\n self.name_to_id = self.get_layer_ids()\n\n @property\n def categorical_key(self):\n return f\"{self.prefix}_{CATEGORICAL}\"\n\n @property\n def label_key(self):\n return f\"{self.prefix}_{LABEL}\"\n\n def forward(\n self, \n batch: dict\n ):\n \"\"\"\n\n Parameters\n ----------\n batch\n A dictionary containing the input mini-batch data.\n We need to use the keys with the model prefix to index required data.\n\n Returns\n -------\n A dictionary with logits and features.\n \"\"\"\n \n categorical_features = []\n for categorical_feature in batch[self.categorical_key]:\n categorical_features.append(categorical_feature)\n categorical_features = torch.stack(categorical_features,dim=1)\n\n features = self.categorical_feature_tokenizer(categorical_features)\n features = self.cls_token(features)\n features = self.transformer(features)\n \n logits = self.head(features)\n\n return {\n self.prefix: {\n LOGITS: logits,\n FEATURES: features,\n }\n }\n\n def get_layer_ids(self,):\n \"\"\"\n All layers have the same id 0 since there is no pre-trained models used here.\n\n Returns\n -------\n A dictionary mapping the layer names (keys) to their ids (values).\n \"\"\"\n name_to_id = {}\n for n, _ in self.named_parameters():\n name_to_id[n] = 0\n\n return name_to_id\n"
] |
[
[
"torch.stack",
"torch.nn.Identity",
"torch.tensor"
]
] |
gilad-rubin/hypster
|
[
"e8a15b9023a1bbffad4e5cdd756aed33e2682931"
] |
[
"tests/test_preprocessing.py"
] |
[
"import sklearn\nimport scipy\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import make_classification\n\nfrom hypster import HyPSTERClassifier\n\nSEED = 50\n\ndef test_flow():\n X, y = make_classification(n_samples=300, n_features=40, n_informative=30, random_state=SEED)\n X = pd.DataFrame(X)\n\n # TODO add categorical\n n = X.shape[0]\n X['A'] = pd.Series(['alpha', 'beta', 'gamma'] * int(n / 4)).head(n)\n X['B'] = pd.Series(np.random.randint(0, 20, n)).astype(str)\n X['C'] = pd.Series(np.random.randint(0, 15, n)).astype(str)\n cat_cols = [\"A\", \"B\", \"C\"]\n # cat_cols = None\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=SEED)\n\n frameworks = [\"xgboost\", \"lightgbm\", \"sklearn\"]\n model_types = [\"linear\", \"tree_based\"]\n\n n_trials = 20\n\n pre_proc = []\n proc_dict = [(\"categorical\", [\"A\", \"B\"], [\"encode\", \"normalize\"]),\n (\"cat_other\", [\"C\"], [\"encode\"])]\n post_proc = [\"feature selection\", \"scale\"]\n\n clf = HyPSTERClassifier(frameworks=frameworks,\n model_types=model_types,\n pre_proc=pre_proc,\n proc_dict=proc_dict,\n post_proc=post_proc,\n scoring=\"roc_auc\",\n cv=3,\n max_iter=10,\n tol=1e-5,\n max_fails=0,\n n_jobs=-1,\n random_state=SEED)\n\n clf.fit(X_train, y_train, cat_cols=cat_cols, n_trials=n_trials)\n preds = clf.predict_proba(X_test)\n roc_score = sklearn.metrics.roc_auc_score(y_test, preds[:, 1])\n assert roc_score > 0.5"
] |
[
[
"sklearn.metrics.roc_auc_score",
"sklearn.datasets.make_classification",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"numpy.random.randint"
]
] |
212726320/BEBO-1
|
[
"2909b3a00161b2e29fad667add30392abc11a968"
] |
[
"src/systems/synthetic.py"
] |
[
"# File: synthetic.py\n# File Created: Sunday, 3rd November 2019 11:38:53 am\n# Author: Steven Atkinson ([email protected])\n\n\"\"\"\nAnother synthetic function\n\"\"\"\n\nimport numpy as np\n\nfrom .base import WithFunction\n\n\nclass Synthetic(WithFunction):\n \"\"\"\n Domain: [0, 1]\n \"\"\"\n def __init__(self):\n super().__init__()\n\n self.real_dimensions = 1\n self.general_dimensions = 1\n self.num_types = [1000]\n self._fidelity_matrix = None\n\n self._cache_fidelity_params()\n\n def _cache_fidelity_params(self):\n # Initialize and cache:\n rng_state = np.random.get_state()\n np.random.seed(42)\n self._fidelity_matrix = np.random.rand(1000, 2)\n # ...And resume previous state\n np.random.set_state(rng_state)\n\n def _call(self, x, i):\n a, b = self._fidelity_matrix[i[0]]\n\n # Original in terms of x_scaled...\n y = a + 4.0 * x - 4.0\n return 0.1 * y ** 4 - y ** 2 + (2.0 + b) * np.sin(2.0 * y)\n"
] |
[
[
"numpy.random.get_state",
"numpy.random.seed",
"numpy.sin",
"numpy.random.set_state",
"numpy.random.rand"
]
] |
yukiar/phrase_alignment_cted
|
[
"7706ca1b8be849f2413d813ea1f3c77919b635ee"
] |
[
"src/bertwrapper.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pytorch_pretrained_bert.modeling import BertModel\nfrom cnn import CNN\nfrom multihead_attention import MultiheadAttention\nfrom position_wise_ffnn import Position_wise_FFNN\n\nMAXPOOLING = 0\nMEANPOOLING = 1\n\n\"\"\"\nInitialize BERT\n\"\"\"\ntry:\n from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm\nexcept ImportError:\n print(\"Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.\")\n\n\n class BertLayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-12):\n \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\n \"\"\"\n super(BertLayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n return self.weight * x + self.bias\n\n\"\"\"\nTransfer Fine-tuned BERT Wrapper\n\"\"\"\n\n\nclass BERTWrapper(nn.Module):\n def __init__(self, config):\n super(BERTWrapper, self).__init__()\n if config['model_path'] in ['bert-base-uncased', 'bert-large-uncased']:\n self.bert = BertModel.from_pretrained(config['bert_model']).cuda()\n else:\n self.bert = BertModel.from_pretrained(config['bert_model'],\n state_dict=torch.load(config['model_path'])).cuda()\n\n self.bert_layer_indexes = config['bert_layers']\n self.bert_emb_dim = config['bert_emb_dim']\n self.init_bert_weights(self.bert)\n self.bert.eval()\n\n def encode(self, features):\n # Compute BERT embedding\n bert_embedding = self._get_bert_embedding(features)\n return bert_embedding.cpu().numpy()\n\n def _get_bert_embedding(self, features):\n embeddings = []\n input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long).cuda()\n input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long).cuda()\n example_indices = torch.arange(input_ids.size(0), dtype=torch.long)\n\n with torch.no_grad():\n all_encoder_layers, _ = self.bert(input_ids, token_type_ids=None, attention_mask=input_mask,\n output_all_encoded_layers=False)\n all_encoder_layers = all_encoder_layers\n\n for b, example_index in enumerate(example_indices):\n embeddings.append(all_encoder_layers[b])\n\n return torch.stack(embeddings)\n\n \"\"\"\n BERT initializer\n \"\"\"\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\n\"\"\"\nModel 1-b BERT Wrapper\n\"\"\"\n\n\nclass BERT1BWrapper(nn.Module):\n def __init__(self, config):\n super(BERT1BWrapper, self).__init__()\n self.bert_layer_indexes = config['bert_layers']\n self.bert_emb_dim = config['bert_emb_dim']\n self.bert = BertModel.from_pretrained(config['bert_model']).cuda()\n self.ff = nn.Linear(self.bert_emb_dim * 2, self.bert_emb_dim)\n\n self.load_state_dict(torch.load(config['model_path']))\n self.init_bert_weights(self.bert)\n self.bert.eval()\n self.ff.eval()\n\n def encode_sentence(self, features):\n # Compute BERT embedding\n bert_embedding = self._get_bert_embedding(features)\n return bert_embedding\n\n def encode_phrase(self, sent_emb, sidx, n, m, pooling):\n if pooling == MAXPOOLING:\n s_phrase_emb = self._ff(sent_emb[sidx, 0, :],\n torch.max(sent_emb[sidx, n.start:n.end, :], dim=0)[0])\n t_phrase_emb = self._ff(sent_emb[sidx, 0, :],\n torch.max(sent_emb[sidx, m.start:m.end, :], dim=0)[0])\n else: # mean pooling\n s_phrase_emb = self._ff(sent_emb[sidx, 0, :],\n torch.mean(sent_emb[sidx, n.start:n.end, :], dim=0))\n t_phrase_emb = self._ff(sent_emb[sidx, 0, :],\n torch.mean(sent_emb[sidx, m.start:m.end, :], dim=0))\n\n return s_phrase_emb.cpu().numpy(), t_phrase_emb.cpu().numpy()\n\n def _ff(self, cls_emb, phrase_emb):\n input = torch.cat((cls_emb, phrase_emb), dim=0)\n with torch.no_grad():\n embed = self.ff(input)\n return embed\n\n def _get_bert_embedding(self, features):\n input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long).cuda()\n input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long).cuda()\n with torch.no_grad():\n all_encoder_layers, _ = self.bert(input_ids, token_type_ids=None, attention_mask=input_mask,\n output_all_encoded_layers=False)\n all_encoder_layers = all_encoder_layers\n\n return all_encoder_layers\n\n \"\"\"\n BERT initializer\n \"\"\"\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\n\"\"\"\nModel 1-c BERT Wrapper\n\"\"\"\n\n\nclass BERT1CWrapper(nn.Module):\n def __init__(self, config):\n super(BERT1CWrapper, self).__init__()\n self.bert_layer_indexes = config['bert_layers']\n self.bert_emb_dim = config['bert_emb_dim']\n self.bert = BertModel.from_pretrained(config['bert_model']).cuda()\n self.fc = nn.Linear(self.bert_emb_dim * 3, 1)\n self.activ = nn.Sigmoid()\n\n self.load_state_dict(torch.load(config['model_path']))\n self.init_bert_weights(self.bert)\n self.bert.eval()\n self.fc.eval()\n\n def encode_sentence(self, features):\n # Compute BERT embedding\n bert_embedding = self._get_bert_embedding(features)\n return bert_embedding\n\n def similarity(self, sent_emb, sidx, n, m, pooling):\n cls_emb = sent_emb[sidx, 0, :]\n if pooling == MAXPOOLING:\n s_phrase_emb = torch.max(sent_emb[sidx, n.start:n.end, :], dim=0)[0]\n t_phrase_emb = torch.max(sent_emb[sidx, m.start:m.end, :], dim=0)[0]\n else: # mean pooling\n s_phrase_emb = torch.mean(sent_emb[sidx, n.start:n.end, :], dim=0)\n t_phrase_emb = torch.mean(sent_emb[sidx, m.start:m.end, :], dim=0)\n\n with torch.no_grad():\n x = self.fc(torch.cat((cls_emb, s_phrase_emb, t_phrase_emb)))\n sim = self.activ(x)\n\n return sim.cpu().numpy()\n\n def _get_bert_embedding(self, features):\n input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long).cuda()\n input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long).cuda()\n with torch.no_grad():\n all_encoder_layers, _ = self.bert(input_ids, token_type_ids=None, attention_mask=input_mask,\n output_all_encoded_layers=False)\n all_encoder_layers = all_encoder_layers\n\n return all_encoder_layers\n\n \"\"\"\n BERT initializer\n \"\"\"\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\n\"\"\"\nModel 1-d BERT Wrapper\n\"\"\"\nclass BERT1DWrapper(nn.Module):\n def __init__(self, config):\n super(BERT1DWrapper, self).__init__()\n self.epsilon = 1e-08\n\n # BERT\n self.bert_layer_indexes = config['bert_layers']\n self.bert_emb_dim = config['bert_emb_dim']\n self.bert = BertModel.from_pretrained(config['bert_model']).cuda()\n\n # CNN\n self.cnn = CNN(config['map_embed_size'])\n self.max_seq_len = config['max_seq_len']\n\n # FFNN\n self.fc = nn.Linear(self.bert_emb_dim * 3, 1)\n self.activ = nn.Sigmoid()\n\n self.load_state_dict(torch.load(config['model_path']))\n self.init_bert_weights(self.bert)\n self.bert.eval()\n self.cnn.eval()\n self.fc.eval()\n self.activ.eval()\n\n def encode_sentence(self, features):\n with torch.no_grad():\n # Compute BERT embedding\n sent_emb = self._get_bert_embedding(features)\n\n # Compute word alignment map\n maps = torch.zeros((len(features), 1, self.max_seq_len, self.max_seq_len)).cuda() # 1dim for channel\n for i in range(len(features)):\n s = torch.cat((sent_emb[i, 0:len(features[i].input_ids_a), :],\n torch.zeros(\n (self.max_seq_len - len(features[i].input_ids_a), self.bert_emb_dim)).cuda()))\n t = torch.cat((sent_emb[i,\n len(features[i].input_ids_a):len(features[i].input_ids_a) + len(features[i].input_ids_b),\n :],\n torch.zeros(\n (self.max_seq_len - len(features[i].input_ids_b), self.bert_emb_dim)).cuda()))\n maps[i, 0, :, :] = torch.tensordot(s, t.t(), dims=1) / torch.max(\n torch.tensordot(torch.norm(s, dim=1, keepdim=True), torch.norm(t, dim=1, keepdim=True).t(), dims=1),\n torch.full((self.max_seq_len, self.max_seq_len), self.epsilon).cuda())\n maps = self.cnn(maps)\n\n return sent_emb, maps\n\n def similarity(self, sent_emb, map_emb, sidx, n, m, pooling):\n cls_emb = map_emb[sidx, :]\n if pooling == MAXPOOLING:\n s_phrase_emb = torch.max(sent_emb[sidx, n.start:n.end, :], dim=0)[0]\n t_phrase_emb = torch.max(sent_emb[sidx, m.start:m.end, :], dim=0)[0]\n else: # mean pooling\n s_phrase_emb = torch.mean(sent_emb[sidx, n.start:n.end, :], dim=0)\n t_phrase_emb = torch.mean(sent_emb[sidx, m.start:m.end, :], dim=0)\n\n with torch.no_grad():\n x = self.fc(torch.cat((cls_emb, s_phrase_emb, t_phrase_emb)))\n predict = self.activ(x)\n\n return predict.cpu().numpy()\n\n def _get_bert_embedding(self, features):\n input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long).cuda()\n input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long).cuda()\n\n all_encoder_layers, _ = self.bert(input_ids, token_type_ids=None, attention_mask=input_mask,\n output_all_encoded_layers=False)\n all_encoder_layers = all_encoder_layers\n\n return all_encoder_layers\n\n \"\"\"\n BERT initializer\n \"\"\"\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\n\"\"\"\nModel 1-e BERT Wrapper\n\"\"\"\n\n\nclass BERT1EWrapper(nn.Module):\n def __init__(self, config):\n super(BERT1EWrapper, self).__init__()\n # BERT\n self.bert_layer_indexes = config['bert_layers']\n self.bert_emb_dim = config['bert_emb_dim']\n self.bert = BertModel.from_pretrained(config['bert_model']).cuda()\n\n # Multihead attention\n self.num_heads = config['num_heads']\n self.atten = MultiheadAttention(self.bert_emb_dim, self.num_heads)\n self.layernorm1 = nn.LayerNorm(self.bert_emb_dim)\n self.layernorm2 = nn.LayerNorm(self.bert_emb_dim)\n\n # FFNN\n self.ffnn = Position_wise_FFNN(self.bert_emb_dim)\n\n self.load_state_dict(torch.load(config['model_path']))\n self.init_bert_weights(self.bert)\n self.atten.eval()\n self.layernorm1.eval()\n self.layernorm2.eval()\n self.bert.eval()\n self.ffnn.eval()\n\n def _get_bert_embedding(self, features):\n input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long).cuda()\n input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long).cuda()\n\n with torch.no_grad():\n all_encoder_layers, _ = self.bert(input_ids, token_type_ids=None, attention_mask=input_mask,\n output_all_encoded_layers=False)\n all_encoder_layers = all_encoder_layers\n\n return all_encoder_layers\n\n def encode_sentence(self, features):\n # Compute BERT embedding\n sent_emb = self._get_bert_embedding(features)\n return sent_emb\n\n def similarity(self, sent_emb, sidx, n, m, pooling):\n cls_emb = sent_emb[sidx, 0, :].reshape((1, 1, self.bert_emb_dim))\n if pooling == MAXPOOLING:\n s_phrase_emb = torch.max(sent_emb[sidx, n.start:n.end, :], dim=0)[0].reshape((1, 1, self.bert_emb_dim))\n t_phrase_emb = torch.max(sent_emb[sidx, m.start:m.end, :], dim=0)[0].reshape((1, 1, self.bert_emb_dim))\n else: # mean pooling\n s_phrase_emb = torch.mean(sent_emb[sidx, n.start:n.end, :], dim=0).reshape((1, 1, self.bert_emb_dim))\n t_phrase_emb = torch.mean(sent_emb[sidx, m.start:m.end, :], dim=0).reshape((1, 1, self.bert_emb_dim))\n\n with torch.no_grad():\n mem = torch.cat((cls_emb, s_phrase_emb), dim=0)\n attn_output, attn_weights = self.atten(t_phrase_emb, mem, mem)\n atten = (t_phrase_emb + attn_output).squeeze(0)\n atten=self.layernorm1(atten)\n fc_atten = self.ffnn(atten)\n atten = atten + fc_atten\n atten=self.layernorm2(atten)\n\n return s_phrase_emb.squeeze().cpu().numpy(), atten.squeeze().cpu().numpy()\n\n def _get_bert_embedding(self, features):\n input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long).cuda()\n input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long).cuda()\n with torch.no_grad():\n all_encoder_layers, _ = self.bert(input_ids, token_type_ids=None, attention_mask=input_mask,\n output_all_encoded_layers=False)\n all_encoder_layers = all_encoder_layers\n\n return all_encoder_layers\n\n \"\"\"\n BERT initializer\n \"\"\"\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\n\"\"\"\nModel 1-f BERT Wrapper\n\"\"\"\nclass BERT1FWrapper(nn.Module):\n def __init__(self, config):\n super(BERT1FWrapper, self).__init__()\n self.epsilon = 1e-08\n\n # BERT\n self.bert_layer_indexes = config['bert_layers']\n self.bert_emb_dim = config['bert_emb_dim']\n self.bert = BertModel.from_pretrained(config['bert_model']).cuda()\n\n # CNN\n self.cnn = CNN(config['map_embed_size'])\n self.max_seq_len = config['max_seq_len']\n\n # Multihead attention\n self.num_heads = config['num_heads']\n self.atten = MultiheadAttention(self.bert_emb_dim, self.num_heads)\n self.layernorm1 = nn.LayerNorm(self.bert_emb_dim)\n self.layernorm2 = nn.LayerNorm(self.bert_emb_dim)\n\n # FFNN\n self.ffnn = Position_wise_FFNN(self.bert_emb_dim)\n\n self.load_state_dict(torch.load(config['model_path']))\n self.init_bert_weights(self.bert)\n self.cnn.eval()\n self.atten.eval()\n self.layernorm1.eval()\n self.layernorm2.eval()\n self.bert.eval()\n self.ffnn.eval()\n\n def _get_bert_embedding(self, features):\n input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long).cuda()\n input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long).cuda()\n\n all_encoder_layers, _ = self.bert(input_ids, token_type_ids=None, attention_mask=input_mask,\n output_all_encoded_layers=False)\n all_encoder_layers = all_encoder_layers\n\n return all_encoder_layers\n\n def encode_sentence(self, features):\n with torch.no_grad():\n # Compute BERT embedding\n sent_emb = self._get_bert_embedding(features)\n\n # Compute word alignment map\n maps = torch.zeros((len(features), 1, self.max_seq_len, self.max_seq_len)).cuda() # 1dim for channel\n for i in range(len(features)):\n s = torch.cat((sent_emb[i, 0:len(features[i].input_ids_a), :],\n torch.zeros(\n (self.max_seq_len - len(features[i].input_ids_a), self.bert_emb_dim)).cuda()))\n t = torch.cat((sent_emb[i,\n len(features[i].input_ids_a):len(features[i].input_ids_a) + len(features[i].input_ids_b),\n :],\n torch.zeros(\n (self.max_seq_len - len(features[i].input_ids_b), self.bert_emb_dim)).cuda()))\n maps[i, 0, :, :] = torch.tensordot(s, t.t(), dims=1) / torch.max(\n torch.tensordot(torch.norm(s, dim=1, keepdim=True), torch.norm(t, dim=1, keepdim=True).t(), dims=1),\n torch.full((self.max_seq_len, self.max_seq_len), self.epsilon).cuda())\n maps = self.cnn(maps)\n\n return sent_emb, maps\n\n def similarity(self, sent_emb, map_emb, sidx, n, m, pooling):\n cls_emb = map_emb[sidx, :].reshape((1, 1, self.bert_emb_dim))\n if pooling == MAXPOOLING:\n s_phrase_emb = torch.max(sent_emb[sidx, n.start:n.end, :], dim=0)[0].reshape((1, 1, self.bert_emb_dim))\n t_phrase_emb = torch.max(sent_emb[sidx, m.start:m.end, :], dim=0)[0].reshape((1, 1, self.bert_emb_dim))\n else: # mean pooling\n s_phrase_emb = torch.mean(sent_emb[sidx, n.start:n.end, :], dim=0).reshape((1, 1, self.bert_emb_dim))\n t_phrase_emb = torch.mean(sent_emb[sidx, m.start:m.end, :], dim=0).reshape((1, 1, self.bert_emb_dim))\n\n with torch.no_grad():\n mem = torch.cat((cls_emb, s_phrase_emb), dim=0)\n attn_output, attn_weights = self.atten(t_phrase_emb, mem, mem)\n atten = (t_phrase_emb + attn_output).squeeze(0)\n atten=self.layernorm1(atten)\n fc_atten = self.ffnn(atten)\n atten = atten + fc_atten\n atten=self.layernorm2(atten)\n\n return s_phrase_emb.squeeze().cpu().numpy(), atten.squeeze().cpu().numpy()\n\n def _get_bert_embedding(self, features):\n input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long).cuda()\n input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long).cuda()\n with torch.no_grad():\n all_encoder_layers, _ = self.bert(input_ids, token_type_ids=None, attention_mask=input_mask,\n output_all_encoded_layers=False)\n all_encoder_layers = all_encoder_layers\n\n return all_encoder_layers\n\n \"\"\"\n BERT initializer\n \"\"\"\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n"
] |
[
[
"torch.mean",
"torch.norm",
"torch.ones",
"torch.max",
"torch.full",
"torch.cat",
"torch.load",
"torch.zeros",
"torch.sqrt",
"torch.nn.Sigmoid",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.tensor",
"torch.no_grad",
"torch.stack"
]
] |
kim0n0/rlberry
|
[
"6f0a68a0ea5ba7cd10c4787fbbdf1ed296d4810c"
] |
[
"rlberry/envs/bandits/corrupted_bandits.py"
] |
[
"import numpy as np\nfrom scipy import stats\n\nfrom rlberry.envs.bandits import Bandit\n\n\nclass CorruptedLaws:\n \"\"\"\n Class for corrupted laws.\n\n Parameters\n ----------\n\n law: law\n Can either be a frozen scipy law or any class that\n has a method .rvs() to sample according to the given law.\n\n cor_prop: float in (0,1/2)\n Proportion of corruption\n\n cor_law: law\n Laws of corruption.\n \"\"\"\n\n def __init__(self, law, cor_prop, cor_law):\n self.law = law\n self.cor_prop = cor_prop\n self.cor_law = cor_law\n\n def rvs(self, random_state):\n is_corrupted = random_state.binomial(1, self.cor_prop)\n if is_corrupted == 1:\n return self.cor_law.rvs(random_state=random_state)\n else:\n return self.law.rvs(random_state=random_state)\n\n def mean(self):\n return (\n 1 - self.cor_prop\n ) * self.law.mean() + self.cor_prop * self.cor_law.mean()\n\n\nclass CorruptedNormalBandit(Bandit):\n \"\"\"\n Class for Bandits corrupted by nature.\n\n Parameters\n ----------\n\n means: array-like of size n_arms, default=array([0,1])\n means of the law of inliers of each of the arms.\n\n stds: array-like of size n_arms or None, default=None\n stds of the law of inliers of each of the arms. If None, use array with\n all ones.\n\n cor_prop: float in (0,1/2), default=0.05\n proportion of corruption\n\n cor_laws: list of scipy frozen laws or None, default=None\n laws of corruption on each arm. If None, all the arms are corrupted by\n a normal of mean 1000 and std 1.\n \"\"\"\n\n def __init__(\n self,\n means=np.array([0, 1]),\n stds=None,\n cor_prop=0.05,\n cor_laws=None,\n ):\n laws = self.make_laws(means, stds, cor_prop, cor_laws)\n Bandit.__init__(self, laws=laws)\n\n def make_laws(self, means, stds, cor_prop, cor_laws):\n if cor_laws is not None:\n self.cor_laws = cor_laws\n else:\n self.cor_laws = [stats.norm(loc=1000) for a in range(len(means))]\n if stds is None:\n self.stds = np.ones(len(means))\n else:\n self.stds = stds\n assert len(means) == len(self.stds)\n assert cor_prop <= 0.5\n inlier_laws = [\n stats.norm(loc=means[a], scale=self.stds[a]) for a in range(len(means))\n ]\n return [\n CorruptedLaws(inlier_laws[a], cor_prop, self.cor_laws[a])\n for a in range(len(means))\n ]\n"
] |
[
[
"scipy.stats.norm",
"numpy.array"
]
] |
cavalleria/humanseg.pytorch
|
[
"77663504b32d777ae22345e660f8bc2132c1c817"
] |
[
"models/pspnet.py"
] |
[
"#------------------------------------------------------------------------------\n# Libraries\n#------------------------------------------------------------------------------\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\nfrom base.base_model import BaseModel\nfrom .backbones import resnet\n\n\nclass ConvBlock(nn.Module):\n\tdef __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):\n\t\tsuper(ConvBlock, self).__init__()\n\t\tself.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias)\n\t\tself.bn = nn.BatchNorm2d(out_channels)\n\n\tdef forward(self, input):\n\t\tx = self.conv(input)\n\t\tx = self.bn(x)\n\t\tx = F.relu(x, inplace=True)\n\t\treturn x\n\nclass PyramidPoolingModule(nn.Module):\n\tdef __init__(self, in_channels, pyramids=[1,2,3,6]):\n\t\tsuper(PyramidPoolingModule, self).__init__()\n\t\tself.pyramids = pyramids\n\t\tout_channels = in_channels // len(pyramids)\n\n\t\tself.convs = nn.ModuleList()\n\t\tfor _ in pyramids:\n\t\t\tconv = ConvBlock(in_channels, out_channels, kernel_size=1, bias=False)\n\t\t\tself.convs.append(conv)\n\n\tdef forward(self, input):\n\t\tfeat = [input]\n\t\theight, width = input.shape[2:]\n\t\tfor i, bin_size in enumerate(self.pyramids):\n\t\t\tx = F.adaptive_avg_pool2d(input, output_size=bin_size)\n\t\t\tx = self.convs[i](x)\n\t\t\tx = F.interpolate(x, size=(height, width), mode='bilinear', align_corners=True)\n\t\t\tfeat.append(x)\n\t\tx = torch.cat(feat, dim=1)\n\t\treturn x\n\n\nclass PSPNet(BaseModel):\n\tdropout = 0.1\n\tpyramids = [1, 2, 3, 6]\n\tbackbone_os = 8\n\n\tdef __init__(self, backbone='resnet18', num_classes=2, pretrained_backbone=None):\n\t\tsuper(PSPNet, self).__init__()\n\t\tif 'resnet' in backbone:\n\t\t\tif backbone=='resnet18':\n\t\t\t\tn_layers = 18\n\t\t\t\tstage5_channels = 512\n\t\t\telif backbone=='resnet34':\n\t\t\t\tn_layers = 34\n\t\t\t\tstage5_channels = 512\n\t\t\telif backbone=='resnet50':\n\t\t\t\tn_layers = 50\n\t\t\t\tstage5_channels = 2048\n\t\t\telif backbone=='resnet101':\n\t\t\t\tn_layers = 101\n\t\t\t\tstage5_channels = 2048\n\t\t\telse:\n\t\t\t\traise NotImplementedError\n\n\t\t\tself.run_backbone = self._run_backbone_resnet\n\t\t\tself.backbone = resnet.get_resnet(n_layers, output_stride=self.backbone_os, num_classes=None)\n\t\t\tself.pyramid = PyramidPoolingModule(in_channels=stage5_channels, pyramids=self.pyramids)\n\t\t\tself.main_output = nn.Sequential(OrderedDict([\n\t\t\t\t(\"conv1\", ConvBlock(2*stage5_channels, stage5_channels//4, kernel_size=3, padding=1, bias=False)),\n\t\t\t\t(\"dropout\", nn.Dropout2d(p=self.dropout)),\n\t\t\t\t(\"conv2\", nn.Conv2d(stage5_channels//4, num_classes, kernel_size=1, bias=False)),\n\t\t\t]))\n\t\t\tself.aux_output = nn.Sequential(OrderedDict([\n\t\t\t\t(\"conv1\", ConvBlock(stage5_channels//2, stage5_channels//8, kernel_size=3, padding=1, bias=False)),\n\t\t\t\t(\"dropout\", nn.Dropout2d(p=self.dropout)),\n\t\t\t\t(\"conv2\", nn.Conv2d(stage5_channels//8, num_classes, kernel_size=1, bias=False)),\n\t\t\t]))\n\t\telse:\n\t\t\traise NotImplementedError\n\n\t\tself.init_weights()\n\t\tif pretrained_backbone is not None:\n\t\t\tself.backbone.load_pretrained_model(pretrained_backbone)\n\n\n\tdef forward(self, input):\n\t\tinp_shape = input.shape[2:]\n\t\tif self.training:\n\t\t\tfeat_stage5, feat_stage4 = self.run_backbone(input)\n\t\t\tfeat_pyramid = self.pyramid(feat_stage5)\n\t\t\tmain_out = self.main_output(feat_pyramid)\n\t\t\tmain_out = F.interpolate(main_out, size=inp_shape, mode='bilinear', align_corners=True)\n\t\t\taux_out = self.aux_output(feat_stage4)\n\t\t\treturn main_out, aux_out\n\t\telse:\n\t\t\tfeat_stage5 = self.run_backbone(input)\n\t\t\tfeat_pyramid = self.pyramid(feat_stage5)\n\t\t\tmain_out = self.main_output(feat_pyramid)\n\t\t\tmain_out = F.interpolate(main_out, size=inp_shape, mode='bilinear', align_corners=True)\n\t\t\treturn main_out\n\n\n\tdef _run_backbone_resnet(self, input):\n\t\t# Stage1\n\t\tx1 = self.backbone.conv1(input)\n\t\tx1 = self.backbone.bn1(x1)\n\t\tx1 = self.backbone.relu(x1)\n\t\t# Stage2\n\t\tx2 = self.backbone.maxpool(x1)\n\t\tx2 = self.backbone.layer1(x2)\n\t\t# Stage3\n\t\tx3 = self.backbone.layer2(x2)\n\t\t# Stage4\n\t\tx4 = self.backbone.layer3(x3)\n\t\t# Stage5\n\t\tx5 = self.backbone.layer4(x4)\n\t\t# Output\n\t\tif self.training:\n\t\t\treturn x5, x4\n\t\telse:\n\t\t\treturn x5"
] |
[
[
"torch.nn.Dropout2d",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.functional.relu",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d"
]
] |
arita37/models
|
[
"659895bff24589829b317f5180d60329906dc13c"
] |
[
"models/object_detection/tensorflow/ssd-resnet34/inference/fp32/coco_metric.py"
] |
[
"# Copyright 2018 Google. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"COCO-style evaluation metrics.\n\nForked from reference model implementation.\n\nCOCO API: github.com/cocodataset/cocoapi/\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport atexit\nimport tempfile\n\nfrom absl import flags\n\nimport numpy as np\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nimport six\n\nimport tensorflow as tf\n\nimport ssd_constants\n\nFLAGS = flags.FLAGS\n\n\n# https://github.com/cocodataset/cocoapi/issues/49\nif six.PY3:\n import pycocotools.coco\n pycocotools.coco.unicode = str\n\n\ndef async_eval_runner(queue_predictions, queue_results, val_json_file):\n \"\"\"Load intermediate eval results and get COCO metrics.\"\"\"\n while True:\n message = queue_predictions.get()\n if message == 'STOP': # poison pill\n break\n step, predictions = message\n results = compute_map(predictions, val_json_file)\n queue_results.put((step, results))\n\n\ndef compute_map(predictions, val_json_file):\n \"\"\"Use model predictions to compute mAP.\n\n Args:\n predictions: a list of tuples returned by decoded_predictions function,\n each containing the following elements:\n image source_id, box coordinates in XYWH order, probability score, label\n val_json_file: path to COCO annotation file\n Returns:\n A dictionary that maps all COCO metrics (keys) to their values\n \"\"\"\n\n if val_json_file.startswith(\"gs://\"):\n _, local_val_json = tempfile.mkstemp(suffix=\".json\")\n tf.gfile.Remove(local_val_json)\n\n tf.gfile.Copy(val_json_file, local_val_json)\n atexit.register(tf.gfile.Remove, local_val_json)\n else:\n local_val_json = val_json_file\n\n cocoGt = COCO(local_val_json)\n cocoDt = cocoGt.loadRes(np.array(predictions))\n E = COCOeval(cocoGt, cocoDt, iouType='bbox')\n E.evaluate()\n E.accumulate()\n E.summarize()\n print(\"Current AP: {:.5f}\".format(E.stats[0]))\n metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1',\n 'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl']\n\n # Prefix with \"COCO\" to group in TensorBoard.\n return {\"COCO/\" + key: value for key, value in zip(metric_names, E.stats)}\n\n\ndef calc_iou(target, candidates):\n target_tiled = np.tile(target[np.newaxis, :], (candidates.shape[0], 1))\n # Left Top & Right Bottom\n lt = np.maximum(target_tiled[:,:2], candidates[:,:2])\n\n rb = np.minimum(target_tiled[:,2:], candidates[:,2:])\n\n delta = np.maximum(rb - lt, 0)\n\n intersect = delta[:,0] * delta[:,1]\n\n delta1 = target_tiled[:,2:] - candidates[:,:2]\n area1 = delta1[:,0] * delta1[:,1]\n delta2 = target_tiled[:,2:] - candidates[:,:2]\n area2 = delta2[:,0] * delta2[:,1]\n\n iou = intersect/(area1 + area2 - intersect)\n return iou\n\n\n# TODO(haoyuzhang): Rewrite this NumPy based implementation to TensorFlow based\n# implementation under ssd_model.py accuracy_function.\ndef decode_predictions(labels_and_predictions):\n \"\"\"Decode predictions and remove unused boxes and labels.\"\"\"\n predictions = []\n for example in labels_and_predictions:\n source_id = int(example[ssd_constants.SOURCE_ID])\n pred_box = example[ssd_constants.PRED_BOXES]\n pred_scores = example[ssd_constants.PRED_SCORES]\n\n locs, labels, probs = decode_single(\n pred_box, pred_scores, ssd_constants.OVERLAP_CRITERIA,\n ssd_constants.MAX_NUM_EVAL_BOXES, ssd_constants.MAX_NUM_EVAL_BOXES)\n\n raw_height, raw_width, _ = example[ssd_constants.RAW_SHAPE]\n for loc, label, prob in zip(locs, labels, probs):\n # Ordering convention differs, hence [1], [0] rather than [0], [1]\n x, y = loc[1] * raw_width, loc[0] * raw_height\n w, h = (loc[3] - loc[1]) * raw_width, (loc[2] - loc[0]) * raw_height\n predictions.append(\n [source_id, x, y, w, h, prob, ssd_constants.CLASS_INV_MAP[label]])\n return predictions\n\n\ndef decode_single(bboxes_in, scores_in, criteria, max_output, max_num=200):\n # Reference to https://github.com/amdegroot/ssd.pytorch\n\n bboxes_out = []\n scores_out = []\n labels_out = []\n\n for i, score in enumerate(np.split(scores_in, scores_in.shape[1], 1)):\n score = np.squeeze(score, 1)\n\n # skip background\n if i == 0:\n continue\n\n mask = score > ssd_constants.MIN_SCORE\n if not np.any(mask):\n continue\n\n bboxes, score = bboxes_in[mask, :], score[mask]\n\n score_idx_sorted = np.argsort(score)\n score_sorted = score[score_idx_sorted]\n\n score_idx_sorted = score_idx_sorted[-max_num:]\n candidates = []\n\n # perform non-maximum suppression\n while len(score_idx_sorted):\n idx = score_idx_sorted[-1]\n bboxes_sorted = bboxes[score_idx_sorted, :]\n bboxes_idx = bboxes[idx, :]\n iou = calc_iou(bboxes_idx, bboxes_sorted)\n\n score_idx_sorted = score_idx_sorted[iou < criteria]\n candidates.append(idx)\n\n bboxes_out.append(bboxes[candidates, :])\n scores_out.append(score[candidates])\n labels_out.extend([i]*len(candidates))\n\n if len(scores_out) == 0:\n tf.logging.info(\"No objects detected. Returning dummy values.\")\n return (\n np.zeros(shape=(1, 4), dtype=np.float32),\n np.zeros(shape=(1,), dtype=np.int32),\n np.ones(shape=(1,), dtype=np.float32) * ssd_constants.DUMMY_SCORE,\n )\n\n bboxes_out = np.concatenate(bboxes_out, axis=0)\n scores_out = np.concatenate(scores_out, axis=0)\n labels_out = np.array(labels_out)\n\n max_ids = np.argsort(scores_out)[-max_output:]\n\n return bboxes_out[max_ids, :], labels_out[max_ids], scores_out[max_ids]\n"
] |
[
[
"numpy.split",
"numpy.minimum",
"numpy.maximum",
"tensorflow.gfile.Copy",
"numpy.squeeze",
"numpy.tile",
"numpy.ones",
"numpy.concatenate",
"tensorflow.logging.info",
"tensorflow.gfile.Remove",
"numpy.any",
"numpy.argsort",
"numpy.array",
"numpy.zeros"
]
] |
CameronBeebe/GamestonkTerminal
|
[
"e235f09290fbc188566643e5a7be46298d33ac35",
"e235f09290fbc188566643e5a7be46298d33ac35"
] |
[
"gamestonk_terminal/etf/financedatabase_view.py",
"discordbot/stocks/technical_analysis/kc.py"
] |
[
"\"\"\"Finance Database view\"\"\"\n__docformat__ = \"numpy\"\n\nimport financedatabase as fd\nimport pandas as pd\nfrom tabulate import tabulate\nfrom gamestonk_terminal import feature_flags as gtff\n\n\ndef show_etfs(\n category: str,\n name: str,\n description: str,\n include_exchanges: bool,\n amount: int,\n options: str,\n):\n \"\"\"\n Display a selection of ETFs based on category, name and/or description filtered by total assets.\n Returns the top ETFs when no argument is given. [Source: Finance Database]\n\n Parameters\n ----------\n category: str\n Search by category to find ETFs matching the criteria.\n name: str\n Search by name to find ETFs matching the criteria.\n description: str\n Search by description to find ETFs matching the criteria.\n include_exchanges: bool\n When you wish to include different exchanges use this boolean.\n amount : int\n Number of ETFs to display, default is 10.\n options : str\n Show the category options.\n \"\"\"\n if options:\n for option in fd.show_options(\"etfs\"):\n print(option)\n return\n\n if category is not None:\n data = fd.select_etfs(\n category=\" \".join(category).title(), exclude_exchanges=include_exchanges\n )\n else:\n data = fd.select_etfs(category=category, exclude_exchanges=include_exchanges)\n\n if name is not None:\n print(name)\n data = fd.search_products(data, query=\" \".join(name), search=\"long_name\")\n if description is not None:\n data = fd.search_products(data, query=\" \".join(description), search=\"summary\")\n\n tabulate_data = pd.DataFrame(data).T[\n [\"long_name\", \"family\", \"category\", \"total_assets\"]\n ]\n tabulate_data_sorted = tabulate_data.sort_values(by=\"total_assets\", ascending=False)\n tabulate_data_sorted[\"total_assets\"] = (\n tabulate_data_sorted[\"total_assets [M]\"] / 1e6\n )\n\n if gtff.USE_TABULATE_DF:\n print(\n tabulate(\n tabulate_data_sorted.iloc[:amount],\n showindex=True,\n headers=[\"Name\", \"Family\", \"Category\", \"Total Assets [M]\"],\n floatfmt=\".2f\",\n tablefmt=\"fancy_grid\",\n ),\n \"\\n\",\n )\n else:\n print(tabulate_data_sorted.iloc[:amount].to_string(), \"\\n\")\n",
"import discord\nimport config_discordbot as cfg\nfrom discordbot import gst_imgur\nfrom datetime import datetime, timedelta\nfrom matplotlib import pyplot as plt\nimport os\nimport helpers\n\nfrom gamestonk_terminal.helper_funcs import plot_autoscale\nfrom gamestonk_terminal.common.technical_analysis import volatility_model\nfrom gamestonk_terminal.config_plot import PLOT_DPI\n\n\nasync def kc_command(\n ctx, ticker=\"\", length=\"20\", scalar=\"2\", mamode=\"sma\", offset=\"0\", start=\"\", end=\"\"\n):\n \"\"\"Displays chart with keltner channel [Yahoo Finance]\"\"\"\n\n try:\n\n # Debug\n if cfg.DEBUG:\n print(\n f\"!stocks.ta.kc {ticker} {length} {scalar} {mamode} {offset} {start} {end}\"\n )\n\n # Check for argument\n possible_ma = [\"sma\", \"ema\", \"wma\", \"hma\", \"zlma\"]\n\n if ticker == \"\":\n raise Exception(\"Stock ticker is required\")\n\n if start == \"\":\n start = datetime.now() - timedelta(days=365)\n else:\n start = datetime.strptime(start, cfg.DATE_FORMAT)\n\n if end == \"\":\n end = datetime.now()\n else:\n end = datetime.strptime(end, cfg.DATE_FORMAT)\n\n if not length.lstrip(\"-\").isnumeric():\n raise Exception(\"Number has to be an integer\")\n length = float(length)\n if not scalar.lstrip(\"-\").isnumeric():\n raise Exception(\"Number has to be an integer\")\n scalar = float(scalar)\n if not offset.lstrip(\"-\").isnumeric():\n raise Exception(\"Number has to be an integer\")\n offset = float(offset)\n\n if mamode not in possible_ma:\n raise Exception(\"Invalid ma entered\")\n\n ticker = ticker.upper()\n df_stock = helpers.load(ticker, start)\n if df_stock.empty:\n raise Exception(\"Stock ticker is invalid\")\n\n # Retrieve Data\n df_stock = df_stock.loc[(df_stock.index >= start) & (df_stock.index < end)]\n\n df_ta = volatility_model.kc(\"1440min\", df_stock, length, scalar, mamode, offset)\n\n # Output Data\n fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)\n ax.plot(df_stock.index, df_stock[\"Adj Close\"].values, color=\"fuchsia\")\n ax.plot(df_ta.index, df_ta.iloc[:, 0].values, \"b\", lw=1.5, label=\"upper\")\n ax.plot(df_ta.index, df_ta.iloc[:, 1].values, \"b\", lw=1.5, ls=\"--\")\n ax.plot(df_ta.index, df_ta.iloc[:, 2].values, \"b\", lw=1.5, label=\"lower\")\n ax.set_title(f\"{ticker} Keltner Channels\")\n ax.set_xlim(df_stock.index[0], df_stock.index[-1])\n ax.set_xlabel(\"Time\")\n ax.set_ylabel(\"Price\")\n\n ax.legend([ticker, df_ta.columns[0], df_ta.columns[1], df_ta.columns[2]])\n ax.fill_between(\n df_ta.index,\n df_ta.iloc[:, 0].values,\n df_ta.iloc[:, 2].values,\n alpha=0.1,\n color=\"b\",\n )\n ax.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n\n plt.gcf().autofmt_xdate()\n fig.tight_layout(pad=1)\n\n plt.legend()\n\n plt.savefig(\"ta_kc.png\")\n uploaded_image = gst_imgur.upload_image(\"ta_kc.png\", title=\"something\")\n image_link = uploaded_image.link\n if cfg.DEBUG:\n print(f\"Image URL: {image_link}\")\n title = \"Stocks: Keltner-Channel \" + ticker\n embed = discord.Embed(title=title, colour=cfg.COLOR)\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n embed.set_image(url=image_link)\n os.remove(\"ta_kc.png\")\n\n await ctx.send(embed=embed)\n\n except Exception as e:\n embed = discord.Embed(\n title=\"ERROR Stocks: Keltner-Channel\",\n colour=cfg.COLOR,\n description=e,\n )\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n\n await ctx.send(embed=embed)\n"
] |
[
[
"pandas.DataFrame"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf"
]
] |
xypan1232/ToxDL
|
[
"751cf1cd1352f4b300d68e520b8fdf9edc768c19"
] |
[
"Evaluation.py"
] |
[
"__author__ = 'jasper.zuallaert, Xiaoyong.Pan'\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.metrics import average_precision_score, roc_auc_score\nfrom sklearn.metrics import f1_score, precision_recall_curve, matthews_corrcoef\n# Evaluates the predictions as written to the a file. The evaluation is done on a per_protein level, as specified in\n# the DeepGO publication (or in our documentation)\n# - file: the location of the file to be read. The file should have alternating lines of\n# a) predictions e.g. 0.243,0.234,0.431,0.013,0.833\n# b) labels e.g. 0,1,1,1,0,0\n# - classN: the class index if we want to evaluate for just 1 single term\ndef run_eval_per_protein(file, classN = None):\n import numpy as np\n mx = 0.0\n allLines = open(file).readlines()\n height = len(allLines)//2\n width = len(allLines[0].split(','))\n\n preds = np.zeros((height,width),dtype=np.float32)\n labels = np.zeros((height,width),dtype=np.int32)\n\n for h in range(height):\n l1 = allLines[h*2].split(',')\n l2 = allLines[h*2+1].split(',')\n for w in range(width):\n preds[h][w] = float(l1[w])\n labels[h][w] = int(l2[w])\n\n if classN != None:\n preds2 = np.zeros((height,1),dtype=np.float32)\n labels2 = np.zeros((height,1),dtype=np.int32)\n for h in range(height):\n preds2[h][0] = preds[h][classN]\n labels2[h][0] = labels[h][classN]\n preds = preds2\n labels = labels2\n\n precisions = []\n recalls = []\n fprs = []\n\n tp_final,fp_final,tn_final,fn_final = 0,0,0,0\n\n for t in range(0,1000):\n thr = t/1000\n preds_after_thr = (preds[:, :] > thr).astype(np.int32)\n\n tp = preds_after_thr * labels\n tp_per_sample = np.sum(tp,axis=1)\n\n fp_per_sample = np.sum(preds_after_thr * (np.ones_like(labels) - labels),axis=1)\n\n pos_predicted_for_sample = np.sum(preds_after_thr,axis=1)\n pos_labels_for_sample = np.sum(labels,axis=1)\n neg_labels_for_sample = len(labels[0]) - pos_labels_for_sample\n\n num_of_predicted_samples = np.sum((np.sum(preds_after_thr,axis=1)[:] > 0).astype(np.int32))\n pr_i_s = np.nan_to_num(tp_per_sample / pos_predicted_for_sample)\n se_i_s = np.nan_to_num(tp_per_sample / pos_labels_for_sample)\n fpr_i_s = np.nan_to_num(fp_per_sample / neg_labels_for_sample)\n pr = np.nan_to_num(np.sum(pr_i_s) / num_of_predicted_samples)\n\n if classN == None:\n se_divisor = height\n fpr_divisor = height\n else:\n se_divisor = np.sum(labels)\n fpr_divisor = np.sum(np.ones_like(labels)-labels)\n\n se = np.sum(se_i_s) / se_divisor\n fpr = np.nan_to_num(np.sum(fpr_i_s) / fpr_divisor)\n\n precisions.append(pr)\n recalls.append(se)\n fprs.append(fpr)\n\n f = 2 * pr * se / (pr + se)\n if f > mx:\n mx = f\n if classN != None and se > 0.50:\n tp_final = np.sum(tp)\n fp_final = np.sum(pos_predicted_for_sample) - tp_final\n fn_final = np.sum(pos_labels_for_sample) - tp_final\n tn_final = len(tp_per_sample) - tp_final - fn_final - fp_final\n\n auROC = auc(fprs,recalls)\n auPRC = auc(recalls,precisions)\n\n print('Total average per protein: {} {} F1={:1.7f} auROC={:1.7f} auPRC={:1.7f}'.format(file,classN,mx,auROC,auPRC))\n\n\n# Evaluates the predictions as written to the a file. The evaluation is done on a per_term level (see our documentation)\n# - file: the location of the file to be read. The file should have alternating lines of\n# a) predictions e.g. 0.243,0.234,0.431,0.013,0.833\n# b) labels e.g. 0,1,1,1,0,0\ndef run_eval_per_term(file):\n import numpy as np\n\n allLines = open(file).readlines()\n height = len(allLines)//3\n width = len(allLines[0].split(','))\n\n preds = np.zeros((width,height),dtype=np.float32)\n labels = np.zeros((width,height),dtype=np.int32)\n\n for h in range(height):\n l1 = allLines[h*3].split(',')\n l2 = allLines[h*3+1].split(',')\n for w in range(width):\n preds[w][h] = float(l1[w])\n labels[w][h] = int(l2[w])\n\n all_auROC = []\n all_auPRC = []\n all_Fmax = []\n from math import isnan\n for termN in range(width):\n auROC, auPRC, Fmax, mcc = _calc_metrics_for_term(preds[termN],labels[termN])\n #auROC, auPRC, Fmax, tp,fn,tn,fp = _calc_metrics_for_term(sorted(zip(preds[termN],labels[termN])))\n #if not isnan(auROC) and not isnan(auROC):\n # all_auROC.append(auROC)\n # all_auPRC.append(auPRC)\n # all_Fmax.append(Fmax)\n print('auROC:', auROC, 'auPRC', auPRC, 'F1:', Fmax, 'MCC', mcc)\n return auROC, auPRC, Fmax, mcc \n #print(f'Term {termN: 3d}: auROC {auROC:1.4f}, auPRC {auPRC:1.4f}, Fmax {Fmax:1.4f} --- Example: TP {tp: 4d}, FP {fp: 4d}, TN {tn: 4d}, FN {fn: 4d}')\n #print(f'Total average per term: auROC {sum(all_auROC)/len(all_auROC)}, {sum(all_auPRC)/len(all_auPRC)}, {sum(all_Fmax)/len(all_Fmax)}')\n\ndef _calc_metrics_for_term(preds, test_label):\n auroc = roc_auc_score(test_label, preds)\n precision, recall, thresholds = precision_recall_curve(test_label, preds)\n auprc = auc(recall, precision)\n preds[preds>=0.5] = 1\n preds[preds<0.5] = 0\n f1score = f1_score(test_label, preds, average='binary')\n mcc = matthews_corrcoef(test_label, preds)\n return auroc, auprc, f1score, mcc\n\n# Calculate the metrics for a single term\n# - pred_and_lab_for_term: predictions and labels for this particular term, of format [(pred1, lab1), (pred2, lab2), ...]\ndef _calc_metrics_for_term1(pred_and_lab_for_term):\n pred_and_lab_for_term = pred_and_lab_for_term[::-1]\n total_pos = sum([x for _,x in pred_and_lab_for_term])\n tp,fp = 0,0\n fn = total_pos\n tn = len(pred_and_lab_for_term) - total_pos\n allSens, allPrec, allFPR = [],[],[]\n Fmax = 0\n\n tp_final, fn_final, tn_final, fp_final = -1, -1, -1, -1\n\n allSens.append(0.0)\n allPrec.append(0.0)\n allFPR.append(0.0)\n\n index = 0\n while index < len(pred_and_lab_for_term):\n last_with_this_probability = index < len(pred_and_lab_for_term) - 1 and pred_and_lab_for_term[index][0] != pred_and_lab_for_term[index+1][0]\n if pred_and_lab_for_term[index][1] == 1:\n tp += 1\n fn -= 1\n else: # 0\n fp += 1\n tn -= 1\n\n sens = tp / (tp + fn)\n prec = tp / (tp + fp)\n fpr = fp / (fp + tn)\n\n if sens > 0.5 and tp_final == -1:\n tp_final = tp\n tn_final = tn\n fp_final = fp\n fn_final = fn\n\n if last_with_this_probability:\n allSens.append(sens)\n allPrec.append(prec)\n f1 = 2 * sens * prec / (sens + prec)\n if f1 > Fmax:\n Fmax = f1\n allFPR.append(fpr)\n\n index += 1\n allSens.append(1.0)\n allPrec.append(total_pos / len(pred_and_lab_for_term))\n allFPR.append(1.0)\n\n auROC = auc(allFPR, allSens)\n auPRC = auc(allSens, allPrec)\n return auROC, auPRC, Fmax, tp_final,fn_final,tn_final,fp_final\n\n"
] |
[
[
"sklearn.metrics.roc_auc_score",
"numpy.ones_like",
"sklearn.metrics.matthews_corrcoef",
"numpy.nan_to_num",
"sklearn.metrics.precision_recall_curve",
"sklearn.metrics.auc",
"sklearn.metrics.f1_score",
"numpy.zeros",
"numpy.sum"
]
] |
bakwc/catboost
|
[
"28acd0d36dbdf2759890f54a775383912ccfb65c"
] |
[
"catboost/pytest/test.py"
] |
[
"from itertools import permutations\nimport yatest.common\nfrom yatest.common import ExecutionTimeoutError, ExecutionError\nimport pytest\nimport os\nimport filecmp\nimport numpy as np\nimport pandas as pd\nimport timeit\nimport json\n\nimport catboost\n\nfrom catboost_pytest_lib import (\n apply_catboost,\n compare_evals_with_precision,\n compare_fit_evals_with_precision,\n compare_evals,\n data_file,\n execute_catboost_fit,\n execute_dist_train,\n format_crossvalidation,\n generate_concatenated_random_labeled_dataset,\n get_limited_precision_dsv_diff_tool,\n local_canonical_file,\n permute_dataset_columns,\n remove_time_from_json,\n)\n\nCATBOOST_PATH = yatest.common.binary_path(\"catboost/app/catboost\")\n\nBOOSTING_TYPE = ['Ordered', 'Plain']\nGROW_POLICIES = ['SymmetricTree', 'Lossguide', 'Depthwise']\nBOOSTING_TYPE_WITH_GROW_POLICIES = [('Ordered', 'SymmetricTree'), ('Plain', 'SymmetricTree'),\n ('Plain', 'Lossguide'), ('Plain', 'Depthwise')]\n\nPREDICTION_TYPES = ['Probability', 'RawFormulaVal', 'Class']\n\nBINCLASS_LOSSES = ['Logloss', 'CrossEntropy']\nMULTICLASS_LOSSES = ['MultiClass', 'MultiClassOneVsAll']\nCLASSIFICATION_LOSSES = BINCLASS_LOSSES + MULTICLASS_LOSSES\nREGRESSION_LOSSES = ['MAE', 'MAPE', 'Poisson', 'Quantile', 'RMSE', 'RMSEWithUncertainty', 'LogLinQuantile', 'Lq']\nPAIRWISE_LOSSES = ['PairLogit', 'PairLogitPairwise']\nGROUPWISE_LOSSES = ['YetiRank', 'YetiRankPairwise', 'QueryRMSE', 'QuerySoftMax']\nRANKING_LOSSES = PAIRWISE_LOSSES + GROUPWISE_LOSSES\nALL_LOSSES = CLASSIFICATION_LOSSES + REGRESSION_LOSSES + RANKING_LOSSES\n\nSAMPLING_UNIT_TYPES = ['Object', 'Group']\n\nOVERFITTING_DETECTOR_TYPE = ['IncToDec', 'Iter']\n\nLOSS_FUNCTIONS = ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile',\n 'Poisson', 'MAPE', 'MultiClass', 'MultiClassOneVsAll']\n\nLEAF_ESTIMATION_METHOD = ['Gradient', 'Newton']\n\n# test both parallel in and non-parallel modes\n# default block size (5000000) is too big to run in parallel on these tests\nSCORE_CALC_OBJ_BLOCK_SIZES = ['60', '5000000']\nSCORE_CALC_OBJ_BLOCK_SIZES_IDS = ['calc_block=60', 'calc_block=5000000']\n\nSEPARATOR_TYPES = [\n 'ByDelimiter',\n 'BySense',\n]\n\nTEXT_FEATURE_ESTIMATORS = [\n 'BoW',\n 'NaiveBayes',\n 'BM25',\n 'BoW,NaiveBayes',\n 'BoW,NaiveBayes,BM25'\n]\n\n\ndef diff_tool(threshold=None):\n return get_limited_precision_dsv_diff_tool(threshold, True)\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_multiregression(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiRMSE',\n '-f', data_file('multiregression', 'train'),\n '--column-description', data_file('multiregression', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 10),\n '--cv-rand', '42',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiregression(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiRMSE',\n pool='multiregression',\n train='train',\n test='test',\n cd='train.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiregression_single(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiRMSE',\n pool='multiregression',\n train='train',\n test='test',\n cd='train_single.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('n_trees', [100, 500])\ndef test_multiregression(boosting_type, grow_policy, n_trees):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_calc_path = yatest.common.test_output_path('test.calc')\n output_metric_path = yatest.common.test_output_path('test.metric')\n\n cmd_fit = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', data_file('multiregression', 'train'),\n '-t', data_file('multiregression', 'test'),\n '--column-description', data_file('multiregression', 'train.cd'),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--column-description', data_file('multiregression', 'train.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_calc_path\n )\n yatest.common.execute(cmd_calc)\n\n cmd_metric = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--column-description', data_file('multiregression', 'train.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_metric_path,\n '--metrics', 'MultiRMSE'\n )\n yatest.common.execute(cmd_metric)\n return [\n local_canonical_file(output_eval_path),\n local_canonical_file(output_calc_path),\n local_canonical_file(output_metric_path)\n ]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [100, 500])\[email protected]('target_count', [1, 2, 3])\ndef test_multiregression_target_permutation_invariance(boosting_type, n_trees, target_count):\n np.random.seed(42)\n\n X_COUNT = 200\n X_DIM = 5\n\n x = np.random.randn(X_COUNT, X_DIM)\n y = np.stack([\n np.sin(np.sum([np.pi * x[:, j] * (1 if np.random.randn() > 0 else -1) for j in range(X_DIM)], axis=0))\n for i in range(target_count)\n ], axis=1)\n\n test_size = X_COUNT // 2\n x_test, y_test = x[:test_size], y[:test_size]\n x_train, y_train = x[test_size:], y[test_size:]\n\n train_file = yatest.common.test_output_path('train')\n test_file = yatest.common.test_output_path('test')\n\n get_eval_path = lambda i: yatest.common.test_output_path('test_{}.eval'.format(i))\n get_model_path = lambda i: yatest.common.test_output_path('model_{}.bin'.format(i))\n get_cd_path = lambda i: yatest.common.test_output_path('cd_{}'.format(i))\n\n with open(get_cd_path(target_count), 'w') as cd:\n cd.write(''.join(('{}\\tTarget\\tm\\n'.format(i) for i in range(target_count))))\n\n evals = []\n for perm in permutations(range(target_count)):\n inv_perm = range(target_count)\n for i, j in enumerate(perm):\n inv_perm[j] = i\n\n np.savetxt(train_file, np.hstack([y_train[:, perm], x_train]), delimiter='\\t')\n np.savetxt(test_file, np.hstack([y_test[:, perm], x_test]), delimiter='\\t')\n\n fit_cmd = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', train_file,\n '-t', test_file,\n '--column-description', get_cd_path(target_count),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', get_model_path(target_count),\n '--eval-file', get_eval_path(target_count),\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', fit_cmd)\n eval = np.loadtxt(get_eval_path(target_count), delimiter='\\t', skiprows=1, usecols=range(1, target_count + 1)).reshape((-1, target_count))\n evals.append(eval[:, inv_perm])\n\n for eva in evals:\n assert np.allclose(eva, evals[0])\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [10, 100, 1000])\[email protected]('target_count', [1, 2, 3])\ndef test_compare_multiregression_with_regression(boosting_type, n_trees, target_count):\n np.random.seed(42)\n ERR_PERC = 0.1\n\n X_COUNT = 200\n X_DIM = 5\n\n x = np.random.randn(X_COUNT, X_DIM)\n y = np.stack([\n np.sin(np.sum([np.pi * x[:, j] * (1 if np.random.randn() > 0 else -1) for j in range(X_DIM)], axis=0))\n for i in range(target_count)\n ], axis=1)\n\n test_size = X_COUNT // 2\n x_test, y_test = x[:test_size], y[:test_size]\n x_train, y_train = x[test_size:], y[test_size:]\n\n train_file = yatest.common.test_output_path('train')\n test_file = yatest.common.test_output_path('test')\n np.savetxt(train_file, np.hstack([y_train, x_train]), delimiter='\\t')\n np.savetxt(test_file, np.hstack([y_test, x_test]), delimiter='\\t')\n\n get_eval_path = lambda i: yatest.common.test_output_path('test_{}.eval'.format(i))\n get_model_path = lambda i: yatest.common.test_output_path('model_{}.bin'.format(i))\n get_cd_path = lambda i: yatest.common.test_output_path('cd_{}'.format(i))\n\n with open(get_cd_path(target_count), 'w') as cd:\n cd.write(''.join(('{}\\tTarget\\tm\\n'.format(i) for i in range(target_count))))\n\n fit_cmd = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', train_file,\n '-t', test_file,\n '--column-description', get_cd_path(target_count),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', get_model_path(target_count),\n '--eval-file', get_eval_path(target_count),\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', fit_cmd)\n\n for i in range(target_count):\n with open(get_cd_path(i), 'w') as cd:\n cd.write(''.join((('{}\\tTarget\\n'.format(j) if j == i else '{}\\tAuxiliary\\n'.format(j)) for j in range(target_count))))\n\n rmse_fit_cmd = (\n '--loss-function', 'RMSE',\n '--boosting-type', boosting_type,\n '-f', train_file,\n '-t', test_file,\n '--column-description', get_cd_path(i),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', get_model_path(i),\n '--eval-file', get_eval_path(i),\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', rmse_fit_cmd)\n\n multirmse_eval = np.loadtxt(get_eval_path(target_count), delimiter='\\t', skiprows=1, usecols=range(1, target_count + 1))\n rmse_eval = np.stack([\n np.loadtxt(get_eval_path(i), delimiter='\\t', skiprows=1, usecols=1)\n for i in range(target_count)\n ], axis=1)\n\n # cannot compare approxes because they are very different due to different boosting algorithms\n multi_rmse_loss = np.mean((multirmse_eval - y_test)**2)\n rmse_loss = np.mean((rmse_eval - y_test)**2)\n\n assert rmse_loss.shape == multi_rmse_loss.shape\n assert multi_rmse_loss < rmse_loss * (1 + ERR_PERC)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [100, 500])\ndef test_multiregression_single(boosting_type, n_trees):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_calc_path = yatest.common.test_output_path('test.calc')\n output_metric_path = yatest.common.test_output_path('test.metric')\n\n cmd_fit = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', data_file('multiregression', 'train'),\n '-t', data_file('multiregression', 'test'),\n '--column-description', data_file('multiregression', 'train_single.cd'),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--column-description', data_file('multiregression', 'train_single.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_calc_path\n )\n yatest.common.execute(cmd_calc)\n\n cmd_metric = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--column-description', data_file('multiregression', 'train_single.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_metric_path,\n '--metrics', 'MultiRMSE'\n )\n yatest.common.execute(cmd_metric)\n return [\n local_canonical_file(output_eval_path),\n local_canonical_file(output_calc_path),\n local_canonical_file(output_metric_path)\n ]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_queryrmse(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_queryrmse_newton_gradient(boosting_type, dev_score_calc_obj_block_size):\n newton_eval_path = yatest.common.test_output_path('newton.eval')\n gradient_eval_path = yatest.common.test_output_path('gradient.eval')\n\n def run_catboost(eval_path, leaf_estimation_method):\n cmd = [\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--leaf-estimation-method', leaf_estimation_method,\n '-i', '20',\n '-T', '4',\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(newton_eval_path, 'Newton')\n run_catboost(gradient_eval_path, 'Gradient')\n assert filecmp.cmp(newton_eval_path, gradient_eval_path)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_pool_with_QueryId(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.query_id'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_rmse_on_qwise_pool(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_averagegain(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'AverageGain:top=2;hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_queryaverage(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'QueryAverage:top=2;hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('sigma', ['sigma=' + str(sigma) for sigma in [0.01, 1, 10]])\[email protected]('num_estimations', ['num_estimations=' + str(n_estim) for n_estim in [1, 100]])\ndef test_stochastic_filter(sigma, num_estimations):\n model_path = yatest.common.test_output_path('model.bin')\n cd_path = yatest.common.test_output_path('pool.cd')\n train_path = yatest.common.test_output_path('train.txt')\n test_path = yatest.common.test_output_path('test.txt')\n\n prng = np.random.RandomState(seed=0)\n\n n_samples_by_query = 20\n n_features = 10\n n_queries = 50\n\n n_samples = n_samples_by_query * n_queries\n\n features = prng.uniform(0, 1, size=(n_samples, n_features))\n weights = prng.uniform(0, 1, size=n_features)\n\n labels = np.dot(features, weights)\n query_ids = np.arange(0, n_samples) // n_queries\n money = (n_queries - np.arange(0, n_samples) % n_queries) * 10\n\n labels = labels.reshape((n_samples, 1))\n query_ids = query_ids.reshape((n_samples, 1))\n money = money.reshape((n_samples, 1))\n\n features = np.hstack((labels, query_ids, money, features))\n\n n_learn = int(0.7 * n_samples)\n learn = features[:n_learn, :]\n test = features[n_learn:, :]\n np.savetxt(train_path, learn, fmt='%.5f', delimiter='\\t')\n np.savetxt(test_path, test, fmt='%.5f', delimiter='\\t')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'GroupId']], fmt='%s', delimiter='\\t')\n\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n learn_error_one_thread_path = yatest.common.test_output_path('learn_error_one_thread.tsv')\n test_error_one_thread_path = yatest.common.test_output_path('test_error_one_thread.tsv')\n loss_description = 'StochasticFilter:' + sigma + ';' + num_estimations\n\n cmd = [\n '--loss-function', loss_description,\n '--leaf-estimation-backtracking', 'No',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-m', model_path,\n '--use-best-model', 'false',\n ]\n\n cmd_one_thread = cmd + [\n '--learn-err-log', learn_error_one_thread_path,\n '--test-err-log', test_error_one_thread_path,\n '-T', '1'\n ]\n\n cmd_four_thread = cmd + [\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '-T', '4'\n ]\n execute_catboost_fit('CPU', cmd_one_thread)\n execute_catboost_fit('CPU', cmd_four_thread)\n\n compare_evals(learn_error_one_thread_path, learn_error_path)\n compare_evals(test_error_one_thread_path, test_error_path)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('metric', ['DCG', 'NDCG'])\[email protected]('top', [-1, 1, 10])\[email protected]('dcg_type', ['Base', 'Exp'])\[email protected]('denominator', ['Position', 'LogPosition'])\ndef test_stochastic_rank(metric, top, dcg_type, denominator):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n loss = 'StochasticRank:metric={};top={};type={};denominator={};hints=skip_train~false'.format(\n metric, top, dcg_type, denominator)\n\n cmd = (\n '--loss-function', loss,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--cd', data_file('querywise', 'train.cd.query_id'),\n '-i', '10',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('top', [-1, 1, 10])\[email protected]('decay', [1.0, 0.6, 0.0])\ndef test_stochastic_rank_pfound(top, decay):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n loss = 'StochasticRank:metric=PFound;top={};decay={};hints=skip_train~false'.format(top, decay)\n\n cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', loss,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--cd', data_file('querywise', 'train.cd.query_id'),\n '-i', '10',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path\n )\n yatest.common.execute(cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('top', [-1, 1, 10])\[email protected]('decay', [1.0, 0.6, 0.0])\ndef test_stochastic_rank_pfound_with_many_ones(top, decay):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n loss = 'StochasticRank:metric=PFound;top={};decay={};hints=skip_train~false'.format(top, decay)\n\n np.random.seed(0)\n train_with_ones = yatest.common.test_output_path('train_with_ones')\n TARGET_COLUMN = 2\n with open(data_file('querywise', 'train')) as fin:\n with open(train_with_ones, 'w') as fout:\n for line in fin.readlines():\n if np.random.random() < 0.25:\n parts = line.split('\\t')\n parts[TARGET_COLUMN] = '1.0'\n line = '\\t'.join(parts)\n fout.write(line)\n\n cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', loss,\n '-f', train_with_ones,\n '--cd', data_file('querywise', 'train.cd.query_id'),\n '-i', '10',\n '--learn-err-log', learn_error_path\n )\n yatest.common.execute(cmd)\n\n return [local_canonical_file(learn_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('top', [2, 100])\ndef test_averagegain_with_query_weights(boosting_type, top):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.group_weight'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'AverageGain:top={};hints=skip_train~false'.format(top),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('top_size', [2, 5, 10, -1])\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('cd_file', ['train.cd', 'train.cd.subgroup_id'])\ndef test_pfound(top_size, boosting_type, cd_file):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', cd_file),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'PFound:top={};hints=skip_train~false'.format(top_size),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_params_ordering():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n learn_error_reversed_path = yatest.common.test_output_path('learn_error_reversed.tsv')\n test_error_path = yatest.common.test_output_path('ignored.tsv')\n\n def get_cmd(custom_metric, learn_error_path):\n return (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '20',\n '-T', '4',\n '--custom-metric', custom_metric,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', get_cmd(\"PFound:top=1;decay=0.6;hints=skip_train~false\", learn_error_path))\n execute_catboost_fit('CPU', get_cmd(\"PFound:decay=0.6;top=1;hints=skip_train~false\", learn_error_reversed_path))\n\n with open(learn_error_path) as f:\n assert 'PFound:top=1;decay=0.6' in f.read()\n with open(learn_error_reversed_path) as f:\n assert 'PFound:decay=0.6;top=1' in f.read()\n\n\ndef test_recall_at_k():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'RecallAt:top=3',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_precision_at_k():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'PrecisionAt:top=3',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_mapk(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'MAP:top={}'.format(10),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('ndcg_power_mode', ['Base', 'Exp'])\[email protected]('metric_type', ['DCG', 'NDCG'])\[email protected]('ndcg_denominator', ['None', 'LogPosition', 'Position'])\ndef test_ndcg(boosting_type, ndcg_power_mode, metric_type, ndcg_denominator):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n denominator = '' if ndcg_denominator == 'None' else ';denominator={}'.format(ndcg_denominator)\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', '{}:top={};type={};hints=skip_train~false{}'.format(metric_type, 10, ndcg_power_mode, denominator),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_queryrmse_approx_on_full_history():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--approx-on-full-history',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--boosting-type', 'Ordered',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_pairlogit(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n def run_catboost(eval_path, learn_pairs):\n cmd = [\n '--loss-function', 'PairLogit',\n '--eval-metric', 'PairAccuracy',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', learn_pairs),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--ctr', 'Borders,Counter',\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(output_eval_path, 'train.pairs')\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(output_eval_path)]\n\n\ndef test_pairs_generation():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n def run_catboost(eval_path):\n cmd = [\n '--loss-function', 'PairLogit',\n '--eval-metric', 'PairAccuracy',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--ctr', 'Borders,Counter',\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(output_eval_path)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(output_eval_path)]\n\n\ndef test_pairs_generation_with_max_pairs():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n def run_catboost(eval_path):\n cmd = [\n '--loss-function', 'PairLogit:max_pairs=30',\n '--eval-metric', 'PairLogit:max_pairs=30',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--ctr', 'Borders,Counter',\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--fstr-file', output_fstr_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(output_eval_path)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(output_eval_path),\n local_canonical_file(output_fstr_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_pairlogit_no_target(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.no_target'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_pairlogit_approx_on_full_history():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--approx-on-full-history',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--boosting-type', 'Ordered',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\[email protected]('pairs_file', ['train.pairs', 'train.pairs.weighted'])\ndef test_pairlogit_pairwise(pairs_file, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'PairLogitPairwise',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_yetirank(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'YetiRank',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', ['QueryRMSE', 'PairLogit', 'YetiRank', 'PairLogitPairwise', 'YetiRankPairwise'])\ndef test_pairwise_reproducibility(loss_function):\n\n def run_catboost(threads, model_path, eval_path):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '5',\n '-T', str(threads),\n '-m', model_path,\n '--eval-file', eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n model_1 = yatest.common.test_output_path('model_1.bin')\n eval_1 = yatest.common.test_output_path('test_1.eval')\n run_catboost(1, model_1, eval_1)\n model_4 = yatest.common.test_output_path('model_4.bin')\n eval_4 = yatest.common.test_output_path('test_4.eval')\n run_catboost(4, model_4, eval_4)\n assert filecmp.cmp(eval_1, eval_4)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_yetirank_with_params(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'YetiRank:permutations=5;decay=0.9',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_yetirank_pairwise(dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'YetiRankPairwise',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', ('YetiRank', 'YetiRankPairwise'))\ndef test_yetirank_default_metric(loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', loss_function,\n '--has-header',\n '-f', data_file('black_friday', 'train'),\n '-t', data_file('black_friday', 'test'),\n '--column-description', data_file('black_friday', 'cd'),\n '--model-file', output_model_path,\n '--boosting-type', 'Plain',\n '-i', '5',\n '-T', '4',\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(test_error_path)]\n\n\nNAN_MODE = ['Min', 'Max']\n\n\[email protected]('nan_mode', NAN_MODE)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_nan_mode(nan_mode, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file('adult_nan', 'train_small'),\n '-t', data_file('adult_nan', 'test_small'),\n '--column-description', data_file('adult_nan', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--nan-mode', nan_mode,\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult_nan', 'test_small'),\n '--column-description', data_file('adult_nan', 'train.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('border_count', [64, 255, 350, 1000, 2500])\ndef test_different_border_count(border_count):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n train_path = data_file('querywise', 'train')\n test_path = data_file('querywise', 'test')\n cd_path = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '20',\n '-T', '4',\n '-x', str(border_count),\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_nan_mode_forbidden(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--nan-mode', 'Forbidden',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_overfit_detector_iter(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '2000',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.5',\n '--rsm', '1',\n '--od-type', 'Iter',\n '--od-wait', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_overfit_detector_inc_to_dec(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '2000',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.5',\n '--rsm', '1',\n '--od-pval', '0.5',\n '--od-type', 'IncToDec',\n '--od-wait', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('overfitting_detector_type', OVERFITTING_DETECTOR_TYPE)\ndef test_overfit_detector_with_resume_from_snapshot(boosting_type, grow_policy, overfitting_detector_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n snapshot_path = yatest.common.test_output_path('snapshot')\n\n cmd_prefix = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.5',\n '--rsm', '1',\n '--leaf-estimation-iterations', '10',\n '--max-ctr-complexity', '4',\n '--snapshot-file', snapshot_path,\n '--od-type', overfitting_detector_type\n )\n if overfitting_detector_type == 'IncToDec':\n cmd_prefix += (\n '--od-wait', '2',\n '--od-pval', '0.5'\n )\n elif overfitting_detector_type == 'Iter':\n cmd_prefix += ('--od-wait', '2')\n\n cmd_first = cmd_prefix + ('-i', '10')\n execute_catboost_fit('CPU', cmd_first)\n\n cmd_second = cmd_prefix + ('-i', '2000')\n execute_catboost_fit('CPU', cmd_second)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\ndef test_per_object_approx_on_full_history(leaf_estimation_method):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Ordered',\n '--approx-on-full-history',\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-w', '0.5',\n '--od-pval', '0.99',\n '--rsm', '1',\n '--leaf-estimation-method', leaf_estimation_method,\n '--leaf-estimation-iterations', '20',\n '--use-best-model', 'false')\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_shrink_model(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '1',\n '--od-pval', '0.99',\n '--rsm', '1',\n '--use-best-model', 'true'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_multi_leaf_estimation_method(leaf_estimation_method, boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'MultiClass',\n '-f', data_file('cloudness_small', 'train_small'),\n '-t', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', leaf_estimation_method,\n '--leaf-estimation-iterations', '2',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\nLOSS_FUNCTIONS_SHORT = ['Logloss', 'MultiClass']\n\n\[email protected](\n 'loss_function',\n LOSS_FUNCTIONS_SHORT,\n ids=['loss_function=%s' % loss_function for loss_function in LOSS_FUNCTIONS_SHORT]\n)\[email protected](\n 'column_name',\n ['doc_id', 'sample_id'],\n ids=['column_name=doc_id', 'column_name=sample_id']\n)\ndef test_sample_id(loss_function, column_name):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n column_description = data_file('adult_' + column_name, 'train.cd')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('adult_doc_id', 'train'),\n '-t', data_file('adult_doc_id', 'test'),\n '--column-description', column_description,\n '--boosting-type', 'Plain',\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult_doc_id', 'test'),\n '--column-description', column_description,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(cmd)\n\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\nPOOLS = ['amazon', 'adult']\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_apply_missing_vals(boosting_type, grow_policy):\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('test_adult_missing_val.tsv'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', output_eval_path\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_crossentropy(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'CrossEntropy',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_permutation_block(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--fold-permutation-block', '239',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_ignored_features(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '-I', '0:1:3:5-7:10000',\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_ignored_features_names():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'RMSE',\n '--has-header',\n '--learn-set', data_file('black_friday', 'train'),\n '--test-set', data_file('black_friday', 'test'),\n '--column-description', data_file('black_friday', 'cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-I', 'Stay_In_Current_City_Years:Product_Category_2:Gender',\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_ignored_features_not_read():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n input_cd_path = data_file('adult', 'train.cd')\n cd_path = yatest.common.test_output_path('train.cd')\n\n with open(input_cd_path, \"rt\") as f:\n cd_lines = f.readlines()\n with open(cd_path, \"wt\") as f:\n for cd_line in cd_lines:\n # Corrupt some features by making them 'Num'\n if cd_line.split() == ('5', 'Categ'): # column 5 --> feature 4\n cd_line = cd_line.replace('Categ', 'Num')\n if cd_line.split() == ('7', 'Categ'): # column 7 --> feature 6\n cd_line = cd_line.replace('Categ', 'Num')\n f.write(cd_line)\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '-I', '4:6', # Ignore the corrupted features\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n # Not needed: return [local_canonical_file(output_eval_path)]\n\n\ndef test_ignored_features_not_read_names():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n input_cd_path = data_file('black_friday', 'cd')\n cd_path = yatest.common.test_output_path('cd')\n\n with open(input_cd_path, \"rt\") as f:\n cd_lines = f.readlines()\n with open(cd_path, \"wt\") as f:\n for cd_line in cd_lines:\n if cd_line.split() == ('2', 'Categ', 'Gender'):\n cd_line = cd_line.replace('2', 'Num', 'Gender')\n if cd_line.split() == ('10', 'Categ', 'Product_Category_3'):\n cd_line = cd_line.replace('10', 'Num', 'Product_Category_3')\n f.write(cd_line)\n\n cmd = (\n '--loss-function', 'RMSE',\n '--has-header',\n '--learn-set', data_file('black_friday', 'train'),\n '--test-set', data_file('black_friday', 'test'),\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-I', 'Gender:Product_Category_3',\n )\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_baseline(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('train_adult_baseline.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('train_adult_baseline.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline(boosting_type, loss_function):\n labels = ['0', '1', '2', '3']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n '--classes-count', '4'\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(eval_path, formula_predict_path))\n return [local_canonical_file(eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline_lost_class(boosting_type, loss_function):\n labels = [0, 1, 2, 3]\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n '--classes-count', '4',\n )\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weights(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weights_no_bootstrap(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--bootstrap-type', 'No',\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weights_gradient(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Gradient'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_logloss_with_not_binarized_target(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_not_binarized', 'train_small'),\n '-t', data_file('adult_not_binarized', 'test_small'),\n '--column-description', data_file('adult_not_binarized', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--target-border', '0.5',\n '--eval-file', output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', LOSS_FUNCTIONS)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_all_targets(loss_function, boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_model_path_without_test = yatest.common.test_output_path('model_without_test.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n base_cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '--counter-calc-method', 'SkipTest', # TODO(kirillovs): remove after setting SkipTest as default type\n '-w', '0.03',\n '-T', '4',\n )\n\n train_with_test_cmd = base_cmd + (\n '-t', data_file('adult', 'test_small'),\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', train_with_test_cmd)\n\n train_without_test_cmd = base_cmd + (\n '-m', output_model_path_without_test,\n )\n execute_catboost_fit('CPU', train_without_test_cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n formula_predict_without_test_path = yatest.common.test_output_path('predict_without_test.eval')\n\n base_calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--prediction-type', 'RawFormulaVal'\n )\n calc_cmd = base_calc_cmd + (\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n )\n calc_cmd_without_test = base_calc_cmd + (\n '-m', output_model_path_without_test,\n '--output-path', formula_predict_without_test_path,\n )\n yatest.common.execute(calc_cmd)\n yatest.common.execute(calc_cmd_without_test)\n if loss_function == 'MAPE':\n # TODO(kirillovs): uncomment this after resolving MAPE problems\n # assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path), local_canonical_file(formula_predict_path)]\n else:\n assert(compare_evals(output_eval_path, formula_predict_path))\n assert(filecmp.cmp(formula_predict_without_test_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_cv(is_inverted, boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 10),\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_for_query(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 7),\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_for_pairs(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 7),\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('bad_cv_params', ['XX', 'YY', 'XY'])\ndef test_multiple_cv_spec(bad_cv_params):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n if bad_cv_params == 'XX':\n cmd += ('--cv', format_crossvalidation(is_inverted=False, n=2, k=10),\n '--cv', format_crossvalidation(is_inverted=False, n=4, k=7))\n elif bad_cv_params == 'XY':\n cmd += ('--cv', format_crossvalidation(is_inverted=False, n=2, k=10),\n '--cv', format_crossvalidation(is_inverted=True, n=4, k=7))\n elif bad_cv_params == 'YY':\n cmd += ('--cv', format_crossvalidation(is_inverted=True, n=2, k=10),\n '--cv', format_crossvalidation(is_inverted=True, n=4, k=7))\n else:\n raise Exception('bad bad_cv_params value:' + bad_cv_params)\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('error_type', ['0folds', 'fold_idx_overflow'])\ndef test_bad_fold_cv_spec(is_inverted, error_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n ('--cv:Inverted' if is_inverted else '--cv:Classical'),\n {'0folds': '0/0', 'fold_idx_overflow': '3/2'}[error_type],\n '--eval-file', output_eval_path,\n )\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_empty_eval(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_time(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--has-time',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_gradient(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Gradient',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'loss_function',\n LOSS_FUNCTIONS_SHORT,\n ids=['loss_function=%s' % loss_function for loss_function in LOSS_FUNCTIONS_SHORT]\n)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_gradient_with_leafwise_approxes(loss_function, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Plain',\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Gradient',\n '--eval-file', output_eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']\n execute_catboost_fit('CPU', cmd)\n assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_newton(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-iterations', '1',\n '--leaf-estimation-method', 'Newton',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_newton_with_leafwise_approxes(dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Plain',\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-iterations', '1',\n '--leaf-estimation-method', 'Newton',\n '--eval-file', output_eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']\n execute_catboost_fit('CPU', cmd)\n assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_newton_on_pool_with_weights(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '40',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Newton',\n '--leaf-estimation-iterations', '7',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_priors(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--ctr', 'Borders:Prior=-2:Prior=0:Prior=8:Prior=1:Prior=-1:Prior=3,'\n 'Counter:Prior=0',\n '--per-feature-ctr', '4:Borders:Prior=0.444,Counter:Prior=0.444;'\n '6:Borders:Prior=0.666,Counter:Prior=0.666;'\n '8:Borders:Prior=-0.888:Prior=0.888,Counter:Prior=-0.888:Prior=0.888',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_ctr_buckets(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--ctr', 'Buckets'\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_fold_len_multiplier(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--fold-len-multiplier', '1.5'\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\nFSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'InternalInteraction', 'Interaction', 'ShapValues', 'PredictionDiff']\nDATASET_DEPENDENT_FSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'LossFunctionChange', 'ShapValues', 'PredictionDiff']\n\n\[email protected]('fstr_type', FSTR_TYPES)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_fstr(fstr_type, boosting_type, grow_policy):\n pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train_small'),\n cd_path=data_file(pool, 'train.cd'),\n boosting_type=boosting_type,\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())\n )\n\n\[email protected]('fstr_type', FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_normalized_model(fstr_type, grow_policy):\n pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train_small'),\n cd_path=data_file(pool, 'train.cd'),\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=True,\n additional_train_params=(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_with_target_border(fstr_type, grow_policy):\n if fstr_type == 'PredictionDiff':\n # because PredictionDiff needs pool without categorical features\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd')\n else:\n train_path = data_file('adult_not_binarized', 'train_small')\n cd_path = data_file('adult_not_binarized', 'train.cd')\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=train_path,\n cd_path=cd_path,\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=('--target-border', '0.4')\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_with_weights(fstr_type, grow_policy):\n return do_test_fstr(\n fstr_type,\n loss_function='RMSE',\n input_path=data_file('querywise', 'train'),\n cd_path=data_file('querywise', 'train.cd.weight'),\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=False\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_with_class_weights(fstr_type, grow_policy):\n pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train_small'),\n cd_path=data_file(pool, 'train.cd'),\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=('--class-weights', '0.25,0.75')\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\ndef test_fstr_with_target_border_and_class_weights(fstr_type):\n if fstr_type == 'PredictionDiff':\n # because PredictionDiff needs pool without categorical features\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd')\n else:\n train_path = data_file('adult_not_binarized', 'train_small')\n cd_path = data_file('adult_not_binarized', 'train.cd')\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=train_path,\n cd_path=cd_path,\n boosting_type='Plain',\n grow_policy='SymmetricTree',\n normalize=False,\n additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')\n )\n\n\ndef do_test_fstr(\n fstr_type,\n loss_function,\n input_path,\n cd_path,\n boosting_type,\n grow_policy,\n normalize,\n additional_train_params=()\n):\n model_path = yatest.common.test_output_path('model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', input_path,\n '--column-description', cd_path,\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '-m', model_path\n ) + additional_train_params\n execute_catboost_fit('CPU', cmd)\n\n if fstr_type == 'PredictionDiff':\n with open(input_path) as input:\n fstr_pool_path = yatest.common.test_output_path('input.tsv')\n with open(fstr_pool_path, \"w\") as output:\n output.write(input.readline())\n output.write(input.readline())\n input_path = fstr_pool_path\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', input_path,\n '--column-description', cd_path,\n '-m', model_path,\n '-o', output_fstr_path,\n '--fstr-type', fstr_type\n )\n\n if normalize:\n make_model_normalized(model_path)\n if not(\n fstr_type == 'PredictionValuesChange' or\n fstr_type == 'InternalFeatureImportance' and loss_function not in RANKING_LOSSES\n ):\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(fstr_cmd)\n return\n\n yatest.common.execute(fstr_cmd)\n\n return local_canonical_file(output_fstr_path)\n\n\ndef make_model_normalized(model_path):\n yatest.common.execute([\n CATBOOST_PATH,\n 'normalize-model',\n '--model-path', model_path,\n '--output-model', model_path,\n '--set-scale', '0.5',\n '--set-bias', '0.125',\n ])\n\n\[email protected]('loss_function', ['QueryRMSE', 'PairLogit', 'YetiRank', 'PairLogitPairwise', 'YetiRankPairwise'])\ndef test_loss_change_fstr(loss_function):\n return do_test_loss_change_fstr(loss_function, normalize=False)\n\n\ndef test_loss_change_fstr_normalized():\n return do_test_loss_change_fstr('QueryRMSE', normalize=True)\n\n\ndef do_test_loss_change_fstr(loss_function, normalize):\n model_path = yatest.common.test_output_path('model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n train_fstr_path = yatest.common.test_output_path('t_fstr.tsv')\n\n def add_loss_specific_params(cmd, fstr_mode):\n if loss_function in ['PairLogit', 'PairLogitPairwise']:\n cmd += ('--column-description', data_file('querywise', 'train.cd.no_target'))\n if fstr_mode:\n cmd += ('--input-pairs', data_file('querywise', 'train.pairs'))\n else:\n cmd += ('--learn-pairs', data_file('querywise', 'train.pairs'))\n else:\n cmd += ('--column-description', data_file('querywise', 'train.cd'))\n return cmd\n\n cmd_prefix = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '--learn-set', data_file('querywise', 'train'),\n '--boosting-type', 'Plain',\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '--fstr-file', train_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n '--model-file', model_path\n )\n cmd = add_loss_specific_params(cmd_prefix, fstr_mode=False)\n execute_catboost_fit('CPU', cmd)\n\n fstr_cmd_prefix = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('querywise', 'train'),\n '--model-file', model_path,\n '--output-path', output_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n fstr_cmd = add_loss_specific_params(fstr_cmd_prefix, fstr_mode=True)\n if normalize:\n make_model_normalized(model_path)\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(fstr_cmd)\n return\n\n yatest.common.execute(fstr_cmd)\n\n fit_output = np.loadtxt(train_fstr_path, dtype='float', delimiter='\\t')\n fstr_output = np.loadtxt(output_fstr_path, dtype='float', delimiter='\\t')\n assert(np.allclose(fit_output, fstr_output, rtol=1e-6))\n\n return [local_canonical_file(output_fstr_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('ranking_parameters', [\n {'loss-function': 'PairLogit', 'fstr-type': 'LossFunctionChange'},\n {'loss-function': 'Logloss', 'fstr-type': 'PredictionValuesChange'}\n])\ndef test_fstr_feature_importance_default_value(boosting_type, ranking_parameters):\n model_path = yatest.common.test_output_path('model.bin')\n fstr_path_0 = yatest.common.test_output_path('fstr_0.tsv')\n fstr_path_1 = yatest.common.test_output_path('fstr_1.tsv')\n internal_fstr_path_0 = yatest.common.test_output_path('internal_fstr_0.tsv')\n internal_fstr_path_1 = yatest.common.test_output_path('internal_fstr_1.tsv')\n\n pool = 'adult' if ranking_parameters['loss-function'] == 'Logloss' else 'black_friday'\n pool_path = data_file(pool, 'train_small' if pool == 'adult' else 'train')\n cd_path = data_file(pool, 'train.cd' if pool == 'adult' else 'cd')\n has_header_suffix = ('--has-header',) if pool == 'black_friday' else ()\n\n cmd = (\n '--use-best-model', 'false',\n '--learn-set', pool_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '--one-hot-max-size', '10',\n '--model-file', model_path,\n '--loss-function', ranking_parameters['loss-function']\n ) + has_header_suffix\n\n if ranking_parameters['loss-function'] == 'Logloss':\n cmd += ('--target-border', '0.5')\n\n execute_catboost_fit(\n 'CPU',\n cmd + ('--fstr-file', fstr_path_0,\n '--fstr-internal-file', internal_fstr_path_0,\n '--fstr-type', 'FeatureImportance')\n )\n execute_catboost_fit(\n 'CPU',\n cmd + ('--fstr-file', fstr_path_1,\n '--fstr-internal-file', internal_fstr_path_1,\n '--fstr-type', ranking_parameters['fstr-type'])\n )\n\n assert filecmp.cmp(fstr_path_0, fstr_path_1)\n assert filecmp.cmp(internal_fstr_path_0, internal_fstr_path_1)\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', pool_path,\n '--column-description', cd_path,\n '--model-file', model_path,\n ) + has_header_suffix\n\n yatest.common.execute(\n fstr_cmd + ('--output-path', fstr_path_1,\n '--fstr-type', 'FeatureImportance')\n )\n yatest.common.execute(\n fstr_cmd + ('--output-path', internal_fstr_path_1,\n '--fstr-type', 'InternalFeatureImportance')\n )\n\n assert filecmp.cmp(fstr_path_0, fstr_path_1)\n assert filecmp.cmp(internal_fstr_path_0, internal_fstr_path_1)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_loss_change_fstr_without_pairs(boosting_type):\n model_path = yatest.common.test_output_path('model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '--learn-set', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '--learning-rate', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '--model-file', model_path\n\n )\n execute_catboost_fit('CPU', cmd)\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--model-file', model_path,\n '--output-path', output_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(fstr_cmd)\n\n try:\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd.no_target'),\n '--model-file', model_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(fstr_cmd)\n except:\n return [local_canonical_file(output_fstr_path)]\n\n assert False\n\n\ndef test_loss_change_fstr_on_different_pool_type():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_dsv_fstr_path = yatest.common.test_output_path('fstr.tsv')\n output_quantized_fstr_path = yatest.common.test_output_path('fstr.tsv.quantized')\n train_fstr_path = yatest.common.test_output_path('train_fstr.tsv')\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n\n cd_file = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '--learn-set', get_pool_path('train', True),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '-i', '10',\n '-T', '4',\n '--fstr-file', train_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n '--model-file', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH, 'fstr',\n '--input-path', get_pool_path('train'),\n '--column-description', cd_file,\n '--input-pairs', data_file('querywise', 'train.pairs'),\n '--model-file', output_model_path,\n '--output-path', output_dsv_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(cmd)\n\n cmd = (\n CATBOOST_PATH, 'fstr',\n '--input-path', get_pool_path('train', True),\n '--input-pairs', data_file('querywise', 'train.pairs'),\n '--model-file', output_model_path,\n '--output-path', output_quantized_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(cmd)\n\n fstr_dsv = np.loadtxt(output_dsv_fstr_path, dtype='float', delimiter='\\t')\n fstr_quantized = np.loadtxt(output_quantized_fstr_path, dtype='float', delimiter='\\t')\n train_fstr = np.loadtxt(train_fstr_path, dtype='float', delimiter='\\t')\n assert(np.allclose(fstr_dsv, fstr_quantized, rtol=1e-6))\n assert(np.allclose(fstr_dsv, train_fstr, rtol=1e-6))\n\n\[email protected]('loss_function', LOSS_FUNCTIONS)\[email protected]('grow_policy', GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_reproducibility(loss_function, grow_policy, dev_score_calc_obj_block_size):\n\n def run_catboost(threads, model_path, eval_path):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '25',\n '-T', str(threads),\n '-m', model_path,\n '--eval-file', eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n model_1 = yatest.common.test_output_path('model_1.bin')\n eval_1 = yatest.common.test_output_path('test_1.eval')\n run_catboost(1, model_1, eval_1)\n model_4 = yatest.common.test_output_path('model_4.bin')\n eval_4 = yatest.common.test_output_path('test_4.eval')\n run_catboost(4, model_4, eval_4)\n assert filecmp.cmp(eval_1, eval_4)\n\n\nBORDER_TYPES = ['Median', 'GreedyLogSum', 'UniformAndQuantiles', 'MinEntropy', 'MaxLogSum', 'Uniform']\n\n\[email protected]('border_type', BORDER_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_feature_border_types(border_type, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--feature-border-type', border_type,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('depth', [4, 8])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_deep_tree_classification(depth, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--depth', str(depth),\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_regularization(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Newton',\n '--eval-file', output_eval_path,\n '--l2-leaf-reg', '5'\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\nREG_LOSS_FUNCTIONS = ['RMSE', 'RMSEWithUncertainty', 'MAE', 'Lq:q=1', 'Lq:q=1.5', 'Lq:q=3', 'Quantile', 'LogLinQuantile', 'Poisson', 'MAPE',\n 'Huber:delta=1.0']\n\n\[email protected]('loss_function', REG_LOSS_FUNCTIONS)\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_reg_targets(loss_function, boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_multi_targets(loss_function, boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('cloudness_small', 'train_small'),\n '-t', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path\n ]\n execute_catboost_fit('CPU', cmd)\n\n if boosting_type == 'Plain':\n cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']\n execute_catboost_fit('CPU', cmd)\n assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\nBORDER_TYPES = ['MinEntropy', 'Median', 'UniformAndQuantiles', 'MaxLogSum', 'GreedyLogSum', 'Uniform']\n\n\[email protected](\n 'border_type',\n BORDER_TYPES,\n ids=lambda border_type: 'border_type=%s' % border_type\n)\[email protected](\n 'border_count',\n [1, 3, 10],\n ids=lambda border_count: 'border_count=%d' % border_count\n)\[email protected](\n 'boosting_type',\n BOOSTING_TYPE,\n ids=lambda boosting_type: 'boosting_type=%s' % boosting_type\n)\ndef test_ctr_target_quantization(border_type, border_count, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '3',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--ctr', 'Borders:TargetBorderType=' + border_type,\n '--ctr-target-border-count', str(border_count)\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nCOUNTER_METHODS = ['Full', 'SkipTest']\n\n\[email protected]('counter_calc_method', COUNTER_METHODS)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_counter_calc(counter_calc_method, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '60',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--counter-calc-method', counter_calc_method\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nCTR_TYPES = ['Borders', 'Buckets', 'BinarizedTargetMeanValue:TargetBorderCount=10', 'Borders,BinarizedTargetMeanValue:TargetBorderCount=10', 'Buckets,Borders']\n\n\[email protected]('ctr_type', CTR_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_ctr_type(ctr_type, boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '3',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--ctr', ctr_type\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_overfitting_detector_metric(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC:hints=skip_train~false',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_same_metric_skip_different(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path_with_custom_metric = yatest.common.test_output_path('test_error_with_custom_metric.tsv')\n learn_error_path_with_custom_metric = yatest.common.test_output_path('learn_error_with_custom_metric.tsv')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n ]\n\n cmd_without_custom_metric = cmd + [\n '--eval-metric', 'AUC:hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n ]\n cmd_with_custom_metric = cmd + [\n '--eval-metric', 'AUC:hints=skip_train~true',\n '--custom-metric', 'AUC:hints=skip_train~false',\n '--learn-err-log', learn_error_path_with_custom_metric,\n '--test-err-log', test_error_path_with_custom_metric,\n ]\n\n execute_catboost_fit('CPU', cmd_without_custom_metric)\n execute_catboost_fit('CPU', cmd_with_custom_metric)\n\n assert filecmp.cmp(learn_error_path_with_custom_metric, learn_error_path)\n\n\[email protected]('loss_function', BINCLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss_for_classification(loss_function, boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n custom_metrics = [\n metric for metric in\n [\n 'AUC:hints=skip_train~false',\n 'Logloss',\n 'CrossEntropy',\n 'Accuracy',\n 'Precision',\n 'Recall',\n 'F1',\n 'TotalF1',\n 'MCC',\n 'BalancedAccuracy',\n 'BalancedErrorRate',\n 'Kappa',\n 'WKappa',\n 'BrierScore',\n 'ZeroOneLoss',\n 'HammingLoss',\n 'HingeLoss',\n 'NormalizedGini'\n ]\n if metric != loss_function\n ]\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '--custom-metric', ','.join(custom_metrics),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n\n if loss_function == 'Logloss':\n cmd += ('--target-border', '0.5')\n\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_loglikelihood_of_prediction(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'LogLikelihoodOfPrediction',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path, diff_tool(1e-7)), local_canonical_file(test_error_path, diff_tool(1e-7))]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss_for_multiclassification(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('cloudness_small', 'train_small'),\n '-t', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--custom-metric',\n 'AUC:hints=skip_train~false;type=OneVsAll,Accuracy,Precision,Recall,F1,TotalF1,MCC,Kappa,WKappa,ZeroOneLoss,HammingLoss,HingeLoss,NormalizedGini',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_calc_prediction_type(boosting_type):\n model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', output_eval_path,\n '--prediction-type', 'Probability'\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_calc_no_target(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n fit_output_eval_path = yatest.common.test_output_path('fit_test.eval')\n calc_output_eval_path = yatest.common.test_output_path('calc_test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--counter-calc-method', 'SkipTest',\n '--eval-file', fit_output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('train_notarget.cd'),\n '-m', model_path,\n '--output-path', calc_output_eval_path\n )\n yatest.common.execute(calc_cmd)\n\n assert(compare_evals(fit_output_eval_path, calc_output_eval_path))\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_classification_progress_restore(boosting_type):\n\n def run_catboost(iters, model_path, eval_path, additional_params=None):\n import random\n import shutil\n import string\n letters = string.ascii_lowercase\n train_random_name = ''.join(random.choice(letters) for i in xrange(8))\n shutil.copy(data_file('adult', 'train_small'), train_random_name)\n cmd = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', train_random_name,\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', str(iters),\n '-T', '4',\n '-m', model_path,\n '--eval-file', eval_path,\n ]\n if additional_params:\n cmd += additional_params\n execute_catboost_fit('CPU', cmd)\n\n canon_model_path = yatest.common.test_output_path('canon_model.bin')\n canon_eval_path = yatest.common.test_output_path('canon_test.eval')\n run_catboost(30, canon_model_path, canon_eval_path)\n model_path = yatest.common.test_output_path('model.bin')\n eval_path = yatest.common.test_output_path('test.eval')\n progress_path = yatest.common.test_output_path('test.cbp')\n run_catboost(15, model_path, eval_path, additional_params=['--snapshot-file', progress_path])\n run_catboost(30, model_path, eval_path, additional_params=['--snapshot-file', progress_path])\n assert filecmp.cmp(canon_eval_path, eval_path)\n # TODO(kirillovs): make this active when progress_file parameter will be deleted from json params\n # assert filecmp.cmp(canon_model_path, model_path)\n\n\[email protected]('loss_function', CLASSIFICATION_LOSSES)\[email protected]('prediction_type', PREDICTION_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_prediction_type(prediction_type, loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--prediction-type', prediction_type\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_const_feature(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n train_path = yatest.common.test_output_path('train_small')\n test_path = yatest.common.test_output_path('test_small')\n train_dataset = np.loadtxt(data_file('adult', 'train_small'), dtype=str, delimiter='\\t')\n test_dataset = np.loadtxt(data_file('adult', 'test_small'), dtype=str, delimiter='\\t')\n train_dataset[:, 14] = '0'\n test_dataset[:, 14] = '0'\n np.savetxt(train_path, train_dataset, fmt='%s', delimiter='\\t')\n np.savetxt(test_path, test_dataset[:10, :], fmt='%s', delimiter='\\t')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', train_path,\n '-t', test_path,\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nQUANTILE_LOSS_FUNCTIONS = ['Quantile', 'LogLinQuantile']\n\n\[email protected]('loss_function', QUANTILE_LOSS_FUNCTIONS)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_quantile_targets(loss_function, boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function + ':alpha=0.9',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantile_targets_exact(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Quantile:alpha=0.9',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Exact'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantile_weights(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Quantile:alpha=0.9',\n '-f', data_file('higgs', 'train_small'),\n '-t', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train_weight.cd'),\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Exact'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantile_categorical(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Quantile:alpha=0.9',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Exact'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_quantile_exact_distributed():\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MAE',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train.cd',\n other_options=(\n '--leaf-estimation-method', 'Exact',\n '--boost-from-average', 'False'\n )\n )))]\n\n\nCUSTOM_LOSS_FUNCTIONS = ['RMSE,MAE', 'Quantile:alpha=0.9', 'MSLE,MedianAbsoluteError,SMAPE',\n 'NumErrors:greater_than=0.01,NumErrors:greater_than=0.1,NumErrors:greater_than=0.5',\n 'FairLoss:smoothness=0.9']\n\n\[email protected]('custom_loss_function', CUSTOM_LOSS_FUNCTIONS)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss(custom_loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '50',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--custom-metric', custom_loss_function,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n eps = 0 if 'MSLE' not in custom_loss_function else 1e-9\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool(eps)),\n local_canonical_file(test_error_path, diff_tool=diff_tool(eps))]\n\n\ndef test_train_dir():\n output_model_path = 'model.bin'\n output_eval_path = 'test.eval'\n train_dir_path = 'trainDir'\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '2',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--train-dir', train_dir_path,\n '--fstr-file', 'fstr.tsv',\n '--fstr-internal-file', 'ifstr.tsv'\n )\n execute_catboost_fit('CPU', cmd)\n outputs = ['time_left.tsv', 'learn_error.tsv', 'test_error.tsv', output_model_path, output_eval_path, 'fstr.tsv', 'ifstr.tsv']\n for output in outputs:\n assert os.path.isfile(train_dir_path + '/' + output)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('qwise_loss', ['QueryRMSE', 'RMSE'])\ndef test_train_on_binarized_equal_train_on_float(boosting_type, qwise_loss):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_model_path_binarized = yatest.common.test_output_path('model_binarized.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n borders_file = yatest.common.test_output_path('borders.tsv')\n borders_file_output = borders_file + '.out'\n predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')\n predictions_path_learn_binarized = yatest.common.test_output_path('predictions_learn_binarized.tsv')\n predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')\n predictions_path_test_binarized = yatest.common.test_output_path('predictions_test_binarized.tsv')\n\n learn_file = data_file('querywise', 'train')\n cd_file = data_file('querywise', 'train.cd')\n test_file = data_file('querywise', 'test')\n params = {\"--loss-function\": qwise_loss,\n \"-f\": learn_file,\n \"-t\": test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '100',\n '-T': '4',\n '-m': output_model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--use-best-model': 'false',\n '--output-borders-file': borders_file_output,\n }\n\n params_binarized = dict(params)\n params_binarized['--input-borders-file'] = borders_file_output\n params_binarized['--output-borders-file'] = borders_file\n params_binarized['-m'] = output_model_path_binarized\n\n execute_catboost_fit(task_type='CPU', params=params)\n\n apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)\n apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)\n\n execute_catboost_fit(\n task_type='CPU',\n params=params_binarized,\n )\n\n apply_catboost(output_model_path_binarized, learn_file, cd_file, predictions_path_learn_binarized)\n apply_catboost(output_model_path_binarized, test_file, cd_file, predictions_path_test_binarized)\n\n assert (filecmp.cmp(predictions_path_learn, predictions_path_learn_binarized))\n assert (filecmp.cmp(predictions_path_test, predictions_path_test_binarized))\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(predictions_path_test),\n local_canonical_file(predictions_path_learn),\n local_canonical_file(borders_file)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_feature_id_fstr(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train_with_id.cd'),\n '-m', model_path,\n '-o', output_fstr_path,\n )\n yatest.common.execute(fstr_cmd)\n\n return local_canonical_file(output_fstr_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_names_logloss(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--class-names', '1,0'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_names_multiclass(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'test_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', output_eval_path,\n '--class-names', '0.,0.5,1.,0.25,0.75'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_names_multiclass_last_class_missed(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'test_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', output_eval_path,\n '--class-names', '0.,0.5,0.25,0.75,1.',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_weight_logloss(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--class-weights', '0.5,2'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_weight_multiclass(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--class-weights', '0.5,2'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_params_from_file(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '6',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--params-file', data_file('params.json')\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_lost_class(boosting_type, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('cloudness_lost_class', 'train_small'),\n '-t', data_file('cloudness_lost_class', 'test_small'),\n '--column-description', data_file('cloudness_lost_class', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--classes-count', '3',\n '--prediction-type', 'RawFormulaVal,Class',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_weight_with_lost_class(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('cloudness_lost_class', 'train_small'),\n '-t', data_file('cloudness_lost_class', 'test_small'),\n '--column-description', data_file('cloudness_lost_class', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--classes-count', '3',\n '--class-weights', '0.5,2,2',\n '--prediction-type', 'RawFormulaVal,Class',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_one_hot(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.1',\n '--one-hot-max-size', '10'\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '--output-path', calc_eval_path\n )\n yatest.common.execute(calc_cmd)\n\n assert(compare_evals(output_eval_path, calc_eval_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_random_strength(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.1',\n '--random-strength', '100'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_only_categorical_features(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult_all_categorical.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.1',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weight_sampling_per_tree(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--sampling-frequency', 'PerTree',\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('used_ram_limit', ['1Kb', '4Gb'])\[email protected](\n 'dev_score_calc_obj_block_size',\n ['600', '5000000'],\n ids=['calc_block=600', 'calc_block=5000000']\n)\ndef test_allow_writing_files_and_used_ram_limit(boosting_type, used_ram_limit, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--allow-writing-files', 'false',\n '--used-ram-limit', used_ram_limit,\n '--loss-function', 'Logloss',\n '--max-ctr-complexity', '5',\n '--depth', '7',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', data_file('airlines_5K', 'cd'),\n '--has-header',\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-w', '0.03',\n '-T', '6',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'ignored_features',\n [True, False],\n ids=['ignored_features=True', 'ignored_features=False']\n)\ndef test_apply_with_permuted_columns(ignored_features):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', data_file('airlines_5K', 'cd'),\n '--has-header',\n '-i', '20',\n '-w', '0.03',\n '-T', '6',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n if ignored_features:\n cmd += ('--ignore-features', '0:2:5')\n\n execute_catboost_fit('CPU', cmd)\n\n permuted_test_path, permuted_cd_path = permute_dataset_columns(\n data_file('airlines_5K', 'test'),\n data_file('airlines_5K', 'cd'),\n seed=123)\n\n permuted_predict_path = yatest.common.test_output_path('permuted_predict.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', permuted_test_path,\n '--has-header',\n '--column-description', permuted_cd_path,\n '-m', output_model_path,\n '--output-path', permuted_predict_path,\n '--output-columns', 'SampleId,RawFormulaVal,Label'\n )\n yatest.common.execute(calc_cmd)\n assert filecmp.cmp(output_eval_path, permuted_predict_path)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_subsample_per_tree(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--sampling-frequency', 'PerTree',\n '--bootstrap-type', 'Bernoulli',\n '--subsample', '0.5',\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_subsample_per_tree_level(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--sampling-frequency', 'PerTreeLevel',\n '--bootstrap-type', 'Bernoulli',\n '--subsample', '0.5',\n )\n if grow_policy == 'Lossguide':\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n else:\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_bagging_per_tree_level(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--bagging-temperature', '0.5',\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_plain(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--boosting-type', 'Plain',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_bootstrap(boosting_type, dev_score_calc_obj_block_size):\n bootstrap_option = {\n 'no': ('--bootstrap-type', 'No',),\n 'bayes': ('--bootstrap-type', 'Bayesian', '--bagging-temperature', '0.0',),\n 'bernoulli': ('--bootstrap-type', 'Bernoulli', '--subsample', '1.0',)\n }\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n )\n for bootstrap in bootstrap_option:\n model_path = yatest.common.test_output_path('model_' + bootstrap + '.bin')\n eval_path = yatest.common.test_output_path('test_' + bootstrap + '.eval')\n execute_catboost_fit('CPU', cmd + ('-m', model_path, '--eval-file', eval_path,) + bootstrap_option[bootstrap])\n\n ref_eval_path = yatest.common.test_output_path('test_no.eval')\n assert(filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bayes.eval')))\n assert(filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bernoulli.eval')))\n\n return [local_canonical_file(ref_eval_path)]\n\n\ndef test_json_logging():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n json_path = yatest.common.test_output_path('catboost_training.json')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--json-log', json_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(remove_time_from_json(json_path))]\n\n\ndef test_json_logging_metric_period():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n json_path = yatest.common.test_output_path('catboost_training.json')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--json-log', json_path,\n '--metric-period', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(remove_time_from_json(json_path))]\n\n\ndef test_output_columns_format():\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n # Intentionally skipped: -t ...\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--output-columns', 'SampleId,RawFormulaVal,#2,Label',\n '--eval-file', output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', formula_predict_path,\n '--output-columns', 'SampleId,RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(output_eval_path, formula_predict_path)\n\n\ndef test_eval_period():\n model_path = yatest.common.test_output_path('adult_model.bin')\n\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', formula_predict_path,\n '--eval-period', '2'\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(formula_predict_path)\n\n\ndef test_weights_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,Weight,Label',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_baseline_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('train_adult_baseline.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,Baseline,Label',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_baseline_from_file_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n eval_0_path = yatest.common.test_output_path('test_0.eval')\n eval_1_path = yatest.common.test_output_path('test_1.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', data_file('higgs', 'train_small'),\n '--test-set', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train_baseline.cd'),\n '-i', '10',\n '--learning-rate', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_0_path,\n '--output-columns', 'SampleId,RawFormulaVal',\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', data_file('higgs', 'train_small'),\n '--test-set', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train_weight.cd'),\n '--learn-baseline', data_file('higgs', 'train_baseline'),\n '--test-baseline', data_file('higgs', 'test_baseline'),\n '-i', '10',\n '--ignore-features', '0', # baseline column\n '--learning-rate', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_1_path,\n '--output-columns', 'SampleId,RawFormulaVal',\n )\n execute_catboost_fit('CPU', cmd)\n\n compare_evals(eval_0_path, eval_1_path)\n\n\ndef test_group_weight_output():\n model_path = yatest.common.test_output_path('model.bin')\n fit_eval_path = yatest.common.test_output_path('test_0.eval')\n calc_eval_path = yatest.common.test_output_path('test_1.eval')\n\n fit_cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', 'QueryRMSE',\n '--learn-set', data_file('querywise', 'train'),\n '--test-set', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.group_weight'),\n '-i', '10',\n '-m', model_path,\n '--eval-file', fit_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,GroupWeight'\n )\n yatest.common.execute(fit_cmd)\n fit_eval = pd.read_csv(fit_eval_path, sep='\\t')\n test_group_weight = pd.read_csv(data_file('querywise', 'test'), sep='\\t', header=None)[0]\n assert 'GroupWeight' in fit_eval.columns\n assert np.allclose(fit_eval['GroupWeight'], test_group_weight)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '-m', model_path,\n '--input-path', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.group_weight'),\n '--output-path', calc_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,GroupWeight'\n )\n yatest.common.execute(calc_cmd)\n calc_eval = pd.read_csv(calc_eval_path, sep='\\t')\n assert 'GroupWeight' in calc_eval.columns\n assert np.allclose(calc_eval['GroupWeight'], test_group_weight)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline_from_file(boosting_type, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path_0 = yatest.common.test_output_path('test_0.eval')\n output_eval_path_1 = yatest.common.test_output_path('test_1.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'train_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', output_eval_path_0,\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'train_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--learn-baseline', output_eval_path_0,\n '--test-baseline', output_eval_path_0,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--class-names', '0.,0.25,0.5,0.75',\n '--eval-file', output_eval_path_1,\n )\n execute_catboost_fit('CPU', cmd)\n\n try:\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'train_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--learn-baseline', output_eval_path_0,\n '--test-baseline', output_eval_path_0,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--class-names', '0.5,0.25,0.75.,0.',\n '--eval-file', output_eval_path_1,\n )\n execute_catboost_fit('CPU', cmd)\n except:\n return [local_canonical_file(output_eval_path_0), local_canonical_file(output_eval_path_1)]\n\n assert False\n\n\ndef test_baseline_from_file_output_on_quantized_pool():\n output_model_path = yatest.common.test_output_path('model.bin')\n eval_0_path = yatest.common.test_output_path('test_0.eval')\n eval_1_path = yatest.common.test_output_path('test_1.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', 'quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n '--test-set', 'quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n '--column-description', data_file('higgs', 'train_baseline.cd'),\n '--learning-rate', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_0_path,\n )\n execute_catboost_fit('CPU', cmd + ('-i', '10'))\n execute_catboost_fit('CPU', cmd + (\n '-i', '10',\n '--learn-baseline', eval_0_path,\n '--test-baseline', eval_0_path,\n '--eval-file', eval_0_path))\n\n execute_catboost_fit('CPU', cmd + (\n '-i', '20',\n '--eval-file', eval_1_path))\n\n compare_evals(eval_0_path, eval_1_path)\n\n\ndef test_query_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'SampleId,Label,RawFormulaVal,GroupId',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_subgroup_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.subgroup_id'),\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'GroupId,SubgroupId,SampleId,Label,RawFormulaVal',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_without_cat_features(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-w', '0.1',\n '--one-hot-max-size', '102',\n '--bootstrap-type', 'No',\n '--random-strength', '0',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef make_deterministic_train_cmd(loss_function, pool, train, test, cd, schema='', test_schema='', dev_score_calc_obj_block_size=None, other_options=()):\n pool_path = schema + data_file(pool, train)\n test_path = test_schema + data_file(pool, test)\n cd_path = data_file(pool, cd)\n cmd = (\n '--loss-function', loss_function,\n '-f', pool_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--random-strength', '0',\n '--has-time',\n '--bootstrap-type', 'No',\n '--boosting-type', 'Plain',\n )\n if dev_score_calc_obj_block_size:\n cmd += ('--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size)\n return cmd + other_options\n\n\ndef run_dist_train(cmd, output_file_switch='--eval-file'):\n eval_0_path = yatest.common.test_output_path('test_0.eval')\n execute_catboost_fit('CPU', cmd + (output_file_switch, eval_0_path,))\n\n eval_1_path = yatest.common.test_output_path('test_1.eval')\n execute_dist_train(cmd + (output_file_switch, eval_1_path,))\n\n eval_0 = np.loadtxt(eval_0_path, dtype='float', delimiter='\\t', skiprows=1)\n eval_1 = np.loadtxt(eval_1_path, dtype='float', delimiter='\\t', skiprows=1)\n assert(np.allclose(eval_0, eval_1, atol=1e-5))\n return eval_1_path\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_with_weights(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_weight.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_with_baseline(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_baseline.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiclass(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiClass',\n pool='cloudness_small',\n train='train_small',\n test='test_small',\n cd='train_float.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiclass_weight(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiClass',\n pool='cloudness_small',\n train='train_small',\n test='test_small',\n cd='train_float_weight.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_quantized(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small_x128_greedylogsum.bin',\n test='test_small',\n cd='train.cd',\n schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum'))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\[email protected]('pairs_file', ['train.pairs', 'train.pairs.weighted'])\[email protected]('target', ['PairLogitPairwise', 'QuerySoftMax'])\ndef test_dist_train_quantized_groupid(dev_score_calc_obj_block_size, pairs_file, target):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function=target,\n pool='querywise',\n train='train_x128_greedylogsum_aqtaa.bin',\n test='test',\n cd='train.cd.query_id',\n schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',\n '--learn-pairs', data_file('querywise', pairs_file)))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_quantized_group_weights(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QueryRMSE',\n pool='querywise',\n train='train.quantized',\n test='test',\n cd='train.cd.query_id',\n schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',\n '--learn-group-weights', data_file('querywise', 'train.group_weights')))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_quantized_baseline(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small_x128_greedylogsum.bin',\n test='train_small_x128_greedylogsum.bin',\n cd='train_baseline.cd',\n schema='quantized://',\n test_schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',\n '--test-baseline', data_file('higgs', 'test_baseline'),\n '--learn-baseline', data_file('higgs', 'train_baseline')))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_queryrmse(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QueryRMSE',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.subgroup_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_subgroup(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QueryRMSE',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.subgroup_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--eval-metric', 'PFound')\n ), output_file_switch='--test-err-log'))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_pairlogit(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='PairLogit',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.query_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--learn-pairs', data_file('querywise', 'train.pairs'))\n )))]\n\n\[email protected]('pairs_file', ['train.pairs', 'train.pairs.weighted'])\ndef test_dist_train_pairlogitpairwise(pairs_file):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='PairLogitPairwise',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd',\n other_options=('--learn-pairs', data_file('querywise', pairs_file))\n )))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_querysoftmax(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QuerySoftMax',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.subgroup_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected]('loss_func', ['Logloss', 'RMSE'])\ndef test_dist_train_auc(loss_func):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function=loss_func,\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_baseline.cd',\n other_options=('--eval-metric', 'AUC')\n ), output_file_switch='--test-err-log'))]\n\n\[email protected]('loss_func', ['Logloss', 'RMSE'])\ndef test_dist_train_auc_weight(loss_func):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function=loss_func,\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_weight.cd',\n other_options=('--eval-metric', 'AUC', '--boost-from-average', '0')\n ), output_file_switch='--test-err-log'))]\n\n\[email protected](reason='Boost from average for distributed training')\[email protected]('schema,train', [('quantized://', 'train_small_x128_greedylogsum.bin'), ('', 'train_small')])\ndef test_dist_train_snapshot(schema, train):\n train_cmd = make_deterministic_train_cmd(\n loss_function='RMSE',\n pool='higgs',\n train=train,\n test='test_small',\n schema=schema,\n cd='train.cd')\n\n eval_10_trees_path = yatest.common.test_output_path('10_trees.eval')\n execute_catboost_fit('CPU', train_cmd + ('-i', '10', '--eval-file', eval_10_trees_path,))\n\n snapshot_path = yatest.common.test_output_path('snapshot')\n execute_dist_train(train_cmd + ('-i', '5', '--snapshot-file', snapshot_path,))\n\n eval_5_plus_5_trees_path = yatest.common.test_output_path('5_plus_5_trees.eval')\n execute_dist_train(train_cmd + ('-i', '10', '--eval-file', eval_5_plus_5_trees_path, '--snapshot-file', snapshot_path,))\n\n assert(filecmp.cmp(eval_10_trees_path, eval_5_plus_5_trees_path))\n return [local_canonical_file(eval_5_plus_5_trees_path)]\n\n\ndef test_dist_train_yetirank():\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='YetiRank',\n pool='querywise',\n train='repeat_same_query_8_times',\n test='repeat_same_query_8_times',\n cd='train.cd'\n ), output_file_switch='--test-err-log'))]\n\n\ndef test_no_target():\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n pairs_path = yatest.common.test_output_path('pairs')\n\n np.savetxt(train_path, [[0], [1], [2], [3], [4]], delimiter='\\t', fmt='%.4f')\n np.savetxt(cd_path, [('0', 'Num')], delimiter='\\t', fmt='%s')\n np.savetxt(pairs_path, [[0, 1], [0, 2], [0, 3], [2, 4]], delimiter='\\t', fmt='%i')\n\n cmd = (\n '-f', train_path,\n '--cd', cd_path,\n '--learn-pairs', pairs_path\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('loss_function', ALL_LOSSES)\ndef test_const_target(loss_function):\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n\n np.savetxt(\n train_path,\n [[0, 0, 0],\n [0, 0, 1],\n [0, 0, 2],\n [0, 0, 3],\n [0, 0, 4]],\n delimiter='\\t',\n fmt='%.4f'\n )\n np.savetxt(cd_path, [('0', 'Target'), ('1', 'GroupId')], delimiter='\\t', fmt='%s')\n\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '--cd', cd_path,\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_negative_weights():\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n\n open(cd_path, 'wt').write('0\\tNum\\n1\\tWeight\\n2\\tTarget\\n')\n np.savetxt(train_path, [\n [0, 1, 2],\n [1, -1, 1]], delimiter='\\t', fmt='%.4f')\n cmd = ('-f', train_path,\n '--cd', cd_path,\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_zero_learning_rate():\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n\n open(cd_path, 'wt').write(\n '0\\tNum\\n'\n '1\\tNum\\n'\n '2\\tTarget\\n')\n np.savetxt(train_path, [\n [0, 1, 2],\n [1, 1, 1]], delimiter='\\t', fmt='%.4f')\n cmd = ('-f', train_path,\n '--cd', cd_path,\n '--learning-rate', '0.0',\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef do_test_eval_metrics(metric, metric_period, train, test, cd, loss_function, additional_train_params=()):\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', metric,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--metric-period', metric_period\n ) + additional_train_params\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['Logloss', 'F1', 'Accuracy', 'PFound', 'TotalF1', 'MCC', 'PairAccuracy'])\ndef test_eval_metrics(metric, metric_period):\n if metric == 'PFound':\n train, test, cd, loss_function = data_file('querywise', 'train'), data_file('querywise', 'test'), data_file('querywise', 'train.cd'), 'QueryRMSE'\n elif metric == 'PairAccuracy':\n # note: pairs are autogenerated\n train, test, cd, loss_function = data_file('querywise', 'train'), data_file('querywise', 'test'), data_file('querywise', 'train.cd'), 'PairLogitPairwise'\n else:\n train, test, cd, loss_function = data_file('adult', 'train_small'), data_file('adult', 'test_small'), data_file('adult', 'train.cd'), 'Logloss'\n\n return do_test_eval_metrics(metric, metric_period, train, test, cd, loss_function)\n\n\ndef test_eval_metrics_with_target_border():\n return do_test_eval_metrics(\n metric='Logloss',\n metric_period='1',\n train=data_file('adult_not_binarized', 'train_small'),\n test=data_file('adult_not_binarized', 'test_small'),\n cd=data_file('adult_not_binarized', 'train.cd'),\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4')\n )\n\n\ndef test_eval_metrics_with_class_weights():\n return do_test_eval_metrics(\n metric='Logloss',\n metric_period='1',\n train=data_file('adult', 'train_small'),\n test=data_file('adult', 'test_small'),\n cd=data_file('adult', 'train.cd'),\n loss_function='Logloss',\n additional_train_params=('--class-weights', '0.25,0.75')\n )\n\n\ndef test_eval_metrics_with_target_border_and_class_weights():\n return do_test_eval_metrics(\n metric='Logloss',\n metric_period='1',\n train=data_file('adult_not_binarized', 'train_small'),\n test=data_file('adult_not_binarized', 'test_small'),\n cd=data_file('adult_not_binarized', 'train.cd'),\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')\n )\n\n\[email protected]('config', [('Constant', 0.2, 0.1), ('Constant', 2, 0.1), ('Decreasing', 0.2, 0.1)])\ndef test_eval_metrics_with_boost_from_average_and_model_shrinkage(config):\n mode, rate, lr = config\n train = data_file('higgs', 'train_small')\n test = data_file('higgs', 'test_small')\n cd = data_file('higgs', 'train.cd')\n loss_function = 'Logloss'\n\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Logloss',\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--metric-period', '10',\n '--learn-err-log', learn_error_path,\n '--model-shrink-mode', mode,\n '--model-shrink-rate', str(rate),\n '--boost-from-average', 'true'\n )\n execute_catboost_fit('CPU', cmd)\n\n test_eval_path = yatest.common.test_output_path('test_output.tsv')\n learn_eval_path = yatest.common.test_output_path('learn_output.tsv')\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'Logloss',\n '--input-path', train,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', learn_eval_path,\n '--block-size', '100',\n '--eval-period', '10',\n '--save-stats',\n )\n yatest.common.execute(cmd)\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'Logloss',\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', test_eval_path,\n '--block-size', '100',\n '--eval-period', '10',\n '--save-stats',\n )\n yatest.common.execute(cmd)\n test_first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1:], 8)\n test_second_metrics = np.round(np.loadtxt(test_eval_path, skiprows=1)[:, 1:], 8)\n learn_first_metrics = np.round(np.loadtxt(learn_error_path, skiprows=1)[:, 1:], 8)\n learn_second_metrics = np.round(np.loadtxt(learn_eval_path, skiprows=1)[:, 1:], 8)\n assert test_first_metrics[-1] == test_second_metrics[-1]\n assert learn_first_metrics[-1] == learn_second_metrics[-1]\n\n\[email protected]('metrics', ['AUC', 'AUC,Precision'])\ndef test_eval_metrics_with_binarized_target(metrics):\n train = data_file('adult', 'train_small')\n test = data_file('adult', 'test_small')\n cd = data_file('adult', 'train.cd')\n loss_function = 'Logloss'\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', loss_function,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--target-border', '0.25',\n '--custom-metric', metrics,\n )\n execute_catboost_fit('CPU', cmd)\n\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metrics,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--save-stats',\n )\n yatest.common.execute(cmd)\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2:], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1:], 8)\n assert np.all(first_metrics == second_metrics)\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['MultiClass', 'MultiClassOneVsAll', 'F1', 'Accuracy', 'TotalF1', 'MCC', 'Precision', 'Recall'])\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('dataset', ['cloudness_small', 'cloudness_lost_class'])\ndef test_eval_metrics_multiclass(metric, loss_function, dataset, metric_period):\n if metric in MULTICLASS_LOSSES and metric != loss_function:\n # MultiClass and MultiClassOneVsAll are incompatible\n return\n\n train, test, cd = data_file(dataset, 'train_small'), data_file(dataset, 'test_small'), data_file(dataset, 'train.cd')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n '--loss-function', loss_function,\n '--custom-metric', metric,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--classes-count', '3',\n '--metric-period', metric_period\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n start_index = 1 if metric == loss_function else 2\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, start_index:], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1:], 8)\n assert np.all(first_metrics == second_metrics)\n return [local_canonical_file(eval_path)]\n\n\ndef test_eval_metrics_class_names():\n labels = ['a', 'b', 'c', 'd']\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'MultiClass',\n '--custom-metric', 'TotalF1,AUC:type=OneVsAll,AUC:type=Mu,AUC:misclass_cost_matrix=0/0.239/1/-1/0.5/0/1.5/-1.2/1/0.67/0/1.3/-0.5/1/0.5/0',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--class-names', ','.join(labels),\n )\n execute_catboost_fit('CPU', cmd)\n\n eval_cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'TotalF1,AUC:type=OneVsAll,AUC:type=Mu,AUC:misclass_cost_matrix=0/0.239/1/-1/0.5/0/1.5/-1.2/1/0.67/0/1.3/-0.5/1/0.5/0',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--save-stats'\n )\n execute_catboost_fit('CPU', cmd)\n yatest.common.execute(eval_cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['Accuracy', 'AUC:type=Ranking'])\ndef test_eval_metrics_with_baseline(metric_period, metric):\n train = data_file('adult_weight', 'train_weight')\n test = data_file('adult_weight', 'test_weight')\n cd = data_file('train_adult_baseline.cd')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', metric,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--metric-period', metric_period\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['Accuracy'])\ndef test_eval_metrics_multiclass_with_baseline(metric_period, metric):\n labels = [0, 1, 2, 3]\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n\n cmd = (\n '--loss-function', 'MultiClass',\n '--eval-metric', metric,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--classes-count', '4',\n '--metric-period', metric_period\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n return [local_canonical_file(eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_ctr_leaf_count_limit(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--ctr-leaf-count-limit', '10',\n '-i', '30',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('loss_function', ['RMSE', 'Logloss', 'CrossEntropy'])\ndef test_boost_from_average(boosting_type, grow_policy, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_calc_eval_path = yatest.common.test_output_path('test_calc.eval')\n output_eval_path_with_avg = yatest.common.test_output_path('test_avg.eval')\n output_eval_path_with_baseline = yatest.common.test_output_path('test_baseline.eval')\n baselined_train = yatest.common.test_output_path('baselined_train')\n baselined_test = yatest.common.test_output_path('baselined_test')\n baselined_cd = yatest.common.test_output_path('baselined.cd')\n\n train_path = data_file('adult', 'train_small')\n test_path = data_file('adult', 'test_small')\n original_cd = data_file('adult', 'train.cd')\n\n # use float32 beacause we use float in C++\n sum_target = np.float32(0)\n obj_count = np.float32(0)\n with open(train_path) as train_f:\n for line in train_f:\n obj_count += 1\n sum_target += np.float32(line.split()[1])\n\n mean_target = sum_target / obj_count\n if loss_function in ['Logloss', 'CrossEntropy']:\n mean_target = -np.log(1 / mean_target - 1)\n mean_target_str = str(mean_target)\n\n def append_baseline_to_pool(source, target):\n with open(source) as source_f, open(target, 'w') as target_f:\n for line in source_f:\n target_f.write(line.rstrip('\\n') + '\\t' + mean_target_str + '\\n')\n\n append_baseline_to_pool(train_path, baselined_train)\n append_baseline_to_pool(test_path, baselined_test)\n\n with open(baselined_cd, 'w') as cd_output, open(original_cd) as cd_input:\n for line in cd_input:\n cd_output.write(line)\n cd_output.write('18\\tBaseline\\n')\n\n base_cmd = (\n '--loss-function', loss_function,\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '30',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n\n execute_catboost_fit('CPU', base_cmd + (\n '-f', baselined_train,\n '-t', baselined_test,\n '--boost-from-average', '0',\n '--column-description', baselined_cd,\n '--eval-file', output_eval_path_with_baseline,\n ))\n execute_catboost_fit('CPU', base_cmd + (\n '-f', train_path,\n '-t', test_path,\n '--boost-from-average', '1',\n '--column-description', original_cd,\n '--eval-file', output_eval_path_with_avg,\n ))\n yatest.common.execute((\n CATBOOST_PATH, 'calc',\n '--cd', original_cd,\n '--input-path', test_path,\n '-m', output_model_path,\n '-T', '1',\n '--output-path', output_calc_eval_path,\n ))\n\n assert compare_fit_evals_with_precision(output_eval_path_with_avg, output_eval_path_with_baseline)\n assert compare_evals(output_eval_path_with_avg, output_calc_eval_path)\n return [local_canonical_file(output_eval_path_with_avg)]\n\n\[email protected]('eval_period', ['1', '2'])\ndef test_eval_non_additive_metric(eval_period):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'AUC:hints=skip_train~false',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '-o', output_eval_path,\n '--eval-period', eval_period,\n '--block-size', '10'\n )\n yatest.common.execute(cmd)\n\n output_eval_in_parts = yatest.common.test_output_path('eval_in_parts.eval')\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'AUC:hints=skip_train~false',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '-o', output_eval_in_parts,\n '--eval-period', eval_period,\n '--calc-on-parts',\n '--block-size', '10'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.loadtxt(output_eval_path, skiprows=1)\n second_metrics = np.loadtxt(output_eval_in_parts, skiprows=1)\n assert np.all(first_metrics == second_metrics)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('max_ctr_complexity', [1, 2])\ndef test_eval_eq_calc(boosting_type, grow_policy, max_ctr_complexity):\n one_hot_max_size = 2\n cd_path = yatest.common.test_output_path('cd.txt')\n train_path = yatest.common.test_output_path('train.txt')\n test_path = yatest.common.test_output_path('test.txt')\n model_path = yatest.common.test_output_path('model.bin')\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n np.savetxt(cd_path, [['0', 'Target'],\n ['1', 'Categ'],\n ['2', 'Categ']\n ], fmt='%s', delimiter='\\t')\n np.savetxt(train_path, [['1', 'A', 'X'],\n ['1', 'B', 'Y'],\n ['1', 'C', 'Y'],\n ['0', 'A', 'Z'],\n ['0', 'B', 'Z'],\n ], fmt='%s', delimiter='\\t')\n np.savetxt(test_path, [['1', 'A', 'Y'],\n ['1', 'D', 'U'],\n ['1', 'D', 'U']\n ], fmt='%s', delimiter='\\t')\n cmd_fit = ('--loss-function', 'Logloss',\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--cd', cd_path,\n '-f', train_path,\n '-t', test_path,\n '-m', model_path,\n '--eval-file', test_eval_path,\n '-i', '5',\n '-T', '1',\n '--max-ctr-complexity', str(max_ctr_complexity),\n '--one-hot-max-size', str(one_hot_max_size),\n )\n cmd_calc = (CATBOOST_PATH, 'calc',\n '--cd', cd_path,\n '--input-path', test_path,\n '-m', model_path,\n '-T', '1',\n '--output-path', calc_eval_path,\n )\n execute_catboost_fit('CPU', cmd_fit)\n yatest.common.execute(cmd_calc)\n assert(compare_evals(test_eval_path, calc_eval_path))\n\n\ndef do_test_object_importances(pool, loss_function, additional_train_params):\n output_model_path = yatest.common.test_output_path('model.bin')\n object_importances_path = yatest.common.test_output_path('object_importances.tsv')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file(pool, 'train_small'),\n '-t', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-i', '10',\n '--boosting-type', 'Plain',\n '-T', '4',\n '-m', output_model_path,\n '--use-best-model', 'false'\n ) + additional_train_params\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'ostr',\n '-f', data_file(pool, 'train_small'),\n '-t', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-m', output_model_path,\n '-o', object_importances_path,\n )\n yatest.common.execute(cmd)\n\n return [local_canonical_file(object_importances_path)]\n\n\[email protected]('loss_function', ['RMSE', 'Logloss', 'Poisson'])\[email protected]('leaf_estimation_iteration', ['1', '2'])\ndef test_object_importances(loss_function, leaf_estimation_iteration):\n additional_train_params = (\n '--leaf-estimation-method', 'Gradient',\n '--leaf-estimation-iterations', leaf_estimation_iteration\n )\n return do_test_object_importances(\n pool='adult',\n loss_function=loss_function,\n additional_train_params=additional_train_params\n )\n\n\ndef test_object_importances_with_target_border():\n return do_test_object_importances(\n pool='adult_not_binarized',\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4')\n )\n\n\ndef test_object_importances_with_class_weights():\n return do_test_object_importances(\n pool='adult',\n loss_function='Logloss',\n additional_train_params=('--class-weights', '0.25,0.75')\n )\n\n\ndef test_object_importances_with_target_border_and_class_weights():\n return do_test_object_importances(\n pool='adult_not_binarized',\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')\n )\n\n\n# Create `num_tests` test files from `test_input_path`.\ndef split_test_to(num_tests, test_input_path):\n test_input_lines = open(test_input_path).readlines()\n test_paths = [yatest.common.test_output_path('test{}'.format(i)) for i in range(num_tests)]\n for testno in range(num_tests):\n test_path = test_paths[testno]\n test_lines = test_input_lines[testno::num_tests]\n open(test_path, 'wt').write(''.join(test_lines))\n return test_paths\n\n\n# Create a few shuffles from list of test files, for use with `-t` option.\ndef create_test_shuffles(test_paths, seed=20181219, prng=None):\n if prng is None:\n prng = np.random.RandomState(seed=seed)\n num_tests = len(test_paths)\n num_shuffles = num_tests # if num_tests < 3 else num_tests * (num_tests - 1)\n test_shuffles = set()\n while len(test_shuffles) < num_shuffles:\n test_shuffles.add(tuple(prng.permutation(test_paths)))\n return [','.join(shuffle) for shuffle in test_shuffles]\n\n\ndef fit_calc_cksum(fit_stem, calc_stem, test_shuffles):\n import hashlib\n last_cksum = None\n for i, shuffle in enumerate(test_shuffles):\n model_path = yatest.common.test_output_path('model{}.bin'.format(i))\n eval_path = yatest.common.test_output_path('eval{}.txt'.format(i))\n execute_catboost_fit('CPU', fit_stem + (\n '-t', shuffle,\n '-m', model_path,\n ))\n yatest.common.execute(calc_stem + (\n '-m', model_path,\n '--output-path', eval_path,\n ))\n cksum = hashlib.md5(open(eval_path).read()).hexdigest()\n if last_cksum is None:\n last_cksum = cksum\n continue\n assert(last_cksum == cksum)\n\n\[email protected]('num_tests', [3, 4])\[email protected]('boosting_type', ['Plain', 'Ordered'])\ndef test_multiple_eval_sets_order_independent(boosting_type, num_tests):\n train_path = data_file('adult', 'train_small')\n cd_path = data_file('adult', 'train.cd')\n test_input_path = data_file('adult', 'test_small')\n fit_stem = (\n '--loss-function', 'RMSE',\n '-f', train_path,\n '--cd', cd_path,\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n )\n calc_stem = (\n CATBOOST_PATH, 'calc',\n '--cd', cd_path,\n '--input-path', test_input_path,\n '-T', '4',\n )\n # We use a few shuffles of tests and check equivalence of resulting models\n prng = np.random.RandomState(seed=20181219)\n test_shuffles = create_test_shuffles(split_test_to(num_tests, test_input_path), prng=prng)\n fit_calc_cksum(fit_stem, calc_stem, test_shuffles)\n\n\[email protected]('num_tests', [3, 4])\[email protected]('boosting_type', ['Plain', 'Ordered'])\ndef test_multiple_eval_sets_querywise_order_independent(boosting_type, num_tests):\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd.query_id')\n test_input_path = data_file('querywise', 'test')\n fit_stem = (\n '--loss-function', 'QueryRMSE',\n '-f', train_path,\n '--cd', cd_path,\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n )\n calc_stem = (CATBOOST_PATH, 'calc',\n '--cd', cd_path,\n '--input-path', test_input_path,\n '-T', '4',\n )\n # We use a few shuffles of tests and check equivalence of resulting models\n prng = np.random.RandomState(seed=20181219)\n test_shuffles = create_test_shuffles(split_test_to(num_tests, test_input_path), prng=prng)\n fit_calc_cksum(fit_stem, calc_stem, test_shuffles)\n\n\ndef test_multiple_eval_sets_no_empty():\n train_path = data_file('adult', 'train_small')\n cd_path = data_file('adult', 'train.cd')\n test_input_path = data_file('adult', 'test_small')\n fit_stem = ('--loss-function', 'RMSE',\n '-f', train_path,\n '--cd', cd_path,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n )\n test0_path = yatest.common.test_output_path('test0.txt')\n open(test0_path, 'wt').write('')\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', fit_stem + (\n '-t', ','.join((test_input_path, test0_path))\n ))\n\n\[email protected]('loss_function', ['RMSE', 'QueryRMSE'])\ndef test_multiple_eval_sets(loss_function):\n num_tests = 5\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd.query_id')\n test_input_path = data_file('querywise', 'test')\n eval_path = yatest.common.test_output_path('test.eval')\n test_paths = list(reversed(split_test_to(num_tests, test_input_path)))\n cmd = ('--loss-function', loss_function,\n '-f', train_path,\n '-t', ','.join(test_paths),\n '--column-description', cd_path,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(eval_path)]\n\n\ndef test_multiple_eval_sets_err_log():\n num_tests = 3\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd.query_id')\n test_input_path = data_file('querywise', 'test')\n test_err_log_path = yatest.common.test_output_path('test-err.log')\n json_log_path = yatest.common.test_output_path('json.log')\n test_paths = reversed(split_test_to(num_tests, test_input_path))\n cmd = ('--loss-function', 'RMSE',\n '-f', train_path,\n '-t', ','.join(test_paths),\n '--column-description', cd_path,\n '-i', '5',\n '-T', '4',\n '--test-err-log', test_err_log_path,\n '--json-log', json_log_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(test_err_log_path),\n local_canonical_file(remove_time_from_json(json_log_path))]\n\n\n# Cast<float>(CityHash('Quvena')) is QNaN\n# Cast<float>(CityHash('Sineco')) is SNaN\[email protected]('cat_value', ['Normal', 'Quvena', 'Sineco'])\ndef test_const_cat_feature(cat_value):\n\n def make_a_set(nrows, value, seed=20181219, prng=None):\n if prng is None:\n prng = np.random.RandomState(seed=seed)\n label = prng.randint(0, nrows, [nrows, 1])\n feature = np.full([nrows, 1], value, dtype='|S{}'.format(len(value)))\n return np.concatenate([label, feature], axis=1)\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Categ']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=20181219)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, make_a_set(10, cat_value, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, make_a_set(10, cat_value, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n cmd = ('--loss-function', 'RMSE',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '5',\n '-T', '4',\n '--eval-file', eval_path,\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_model_metadata():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '2',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-w', '0.1',\n '--set-metadata-from-freeargs',\n 'A', 'A',\n 'BBB', 'BBB',\n 'CCC', 'A'\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'metadata', 'set',\n '-m', output_model_path,\n '--key', 'CCC',\n '--value', 'CCC'\n )\n yatest.common.execute(calc_cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'metadata', 'set',\n '-m', output_model_path,\n '--key', 'CCC',\n '--value', 'CCC'\n )\n yatest.common.execute(calc_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(output_model_path)\n\n assert 'A' == py_catboost.get_metadata()['A']\n assert 'BBB' == py_catboost.get_metadata()['BBB']\n assert 'CCC' == py_catboost.get_metadata()['CCC']\n\n\ndef test_fit_multiclass_with_class_names():\n labels = ['a', 'b', 'c', 'd']\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '--class-names', ','.join(labels),\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '--use-best-model', 'false',\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', eval_path\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n\n return [local_canonical_file(eval_path)]\n\n\ndef test_extract_multiclass_labels_from_class_names():\n labels = ['a', 'b', 'c', 'd']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '--class-names', ','.join(labels),\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-T', '4',\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', 'RawFormulaVal,Class',\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n yatest.common.execute(calc_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['a', 'b', 'c', 'd']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n assert json.loads(py_catboost.get_metadata()['params'])['data_processing_options']['class_names'] == ['a', 'b', 'c', 'd']\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('loss_function', ['MultiClass', 'MultiClassOneVsAll', 'Logloss', 'RMSE'])\ndef test_save_class_labels_from_data(loss_function):\n labels = [10000000, 7, 0, 9999]\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n if loss_function == 'Logloss':\n cmd += ('--target-border', '0.5')\n\n execute_catboost_fit('CPU', cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n if loss_function in MULTICLASS_LOSSES:\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['0.0', '7.0', '9999.0', '10000000.0']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n elif loss_function == 'Logloss':\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n else:\n assert 'class_params' not in py_catboost.get_metadata()\n\n\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_apply_multiclass_labels_from_data(prediction_type):\n labels = [10000000, 7, 0, 9999]\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', prediction_type,\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n yatest.common.execute(calc_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['0.0', '7.0', '9999.0', '10000000.0']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n if prediction_type in ['Probability', 'RawFormulaVal']:\n with open(eval_path, \"rt\") as f:\n for line in f:\n assert line[:-1] == 'SampleId\\t{}:Class=0.0\\t{}:Class=7.0\\t{}:Class=9999.0\\t{}:Class=10000000.0' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n break\n else: # Class\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if not i:\n assert line[:-1] == 'SampleId\\tClass'\n else:\n assert float(line[:-1].split()[1]) in labels\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_save_and_apply_multiclass_labels_from_classes_count(loss_function, prediction_type):\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, [0, 1, 2, 3], prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', loss_function,\n '--classes-count', '4',\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [1, 2]\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 4\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', prediction_type\n )\n\n yatest.common.execute(calc_cmd)\n\n if prediction_type == 'RawFormulaVal':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\t{}:Class=0\\t{}:Class=1\\t{}:Class=2\\t{}:Class=3' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n else:\n assert float(line[:-1].split()[1]) == float('-inf') and float(line[:-1].split()[4]) == float('-inf') # fictitious approxes must be negative infinity\n\n if prediction_type == 'Probability':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\t{}:Class=0\\t{}:Class=1\\t{}:Class=2\\t{}:Class=3' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n else:\n assert (abs(float(line[:-1].split()[1])) < 1e-307\n and abs(float(line[:-1].split()[4])) < 1e-307) # fictitious probabilities must be virtually zero\n\n if prediction_type == 'Class':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\tClass'\n else:\n assert float(line[:-1].split()[1]) in [1, 2] # probability of 0,3 classes appearance must be zero\n\n return [local_canonical_file(eval_path)]\n\n\ndef test_set_class_names_implicitly():\n INPUT_CLASS_LABELS = ['a', 'bc', '7.', '8.0', '19.2']\n SAVED_CLASS_LABELS = ['19.2', '7.', '8.0', 'a', 'bc']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, INPUT_CLASS_LABELS, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, INPUT_CLASS_LABELS, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', 'RawFormulaVal,Class',\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3, 4]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == SAVED_CLASS_LABELS\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n yatest.common.execute(calc_cmd)\n\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if not i:\n assert line[:-1] == 'SampleId\\t{}:Class=19.2\\t{}:Class=7.\\t{}:Class=8.0\\t{}:Class=a\\t{}:Class=bc\\tClass' \\\n .format(*(['RawFormulaVal'] * 5))\n else:\n label = line[:-1].split()[-1]\n assert label in SAVED_CLASS_LABELS\n\n return [local_canonical_file(eval_path)]\n\n\nCANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH = data_file('', 'multiclass_model.bin')\n\n\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_multiclass_model_backward_compatibility(prediction_type):\n model = catboost.CatBoost()\n model.load_model(CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH)\n\n assert 'class_params' not in model.get_metadata()\n\n pool = catboost.Pool(data_file('cloudness_small', 'train_small'),\n column_description=data_file('cloudness_small', 'train.cd'))\n model.predict(data=pool, prediction_type='Class')\n model.eval_metrics(data=pool, metrics=['Accuracy'])\n\n output_path = yatest.common.test_output_path('out.txt')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('cloudness_small', 'train_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '-m', CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH,\n '--prediction-type', prediction_type,\n '--output-path', output_path,\n )\n\n yatest.common.execute(calc_cmd)\n return [local_canonical_file(output_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('use_best_model', ['true', 'false'])\ndef test_learning_rate_auto_set(boosting_type, use_best_model):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', use_best_model,\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--od-type', 'Iter',\n '--od-wait', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_paths_with_dsv_scheme():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', 'dsv://' + data_file('querywise', 'train'),\n '-t', 'dsv://' + data_file('querywise', 'test'),\n '--column-description', 'dsv://' + data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_skip_train():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n json_log_path = yatest.common.test_output_path('json_log.json')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'AverageGain:top=2;hints=skip_train~true',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--json-log', json_log_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(remove_time_from_json(json_log_path))]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_group_weight(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n def run_catboost(train_path, test_path, cd_path, eval_path):\n cmd = (\n '--loss-function', 'YetiRank',\n '-f', data_file('querywise', train_path),\n '-t', data_file('querywise', test_path),\n '--column-description', data_file('querywise', cd_path),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n output_eval_path_first = yatest.common.test_output_path('test_first.eval')\n output_eval_path_second = yatest.common.test_output_path('test_second.eval')\n run_catboost('train', 'test', 'train.cd', output_eval_path_first)\n run_catboost('train.const_group_weight', 'test.const_group_weight', 'train.cd.group_weight', output_eval_path_second)\n assert filecmp.cmp(output_eval_path_first, output_eval_path_second)\n\n run_catboost('train', 'test', 'train.cd.group_weight', output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('loss_function', ['QueryRMSE', 'RMSE'])\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_group_weight_and_object_weight(boosting_type, grow_policy, loss_function, dev_score_calc_obj_block_size):\n\n def run_catboost(train_path, test_path, cd_path, eval_path):\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('querywise', train_path),\n '-t', data_file('querywise', test_path),\n '--column-description', data_file('querywise', cd_path),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n output_eval_path_first = yatest.common.test_output_path('test_first.eval')\n output_eval_path_second = yatest.common.test_output_path('test_second.eval')\n run_catboost('train', 'test', 'train.cd.group_weight', output_eval_path_first)\n run_catboost('train', 'test', 'train.cd.weight', output_eval_path_second)\n assert filecmp.cmp(output_eval_path_first, output_eval_path_second)\n\n\ndef test_snapshot_without_random_seed():\n\n def run_catboost(iters, eval_path, additional_params=None):\n cmd = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', str(iters),\n '-T', '4',\n '--use-best-model', 'False',\n '--eval-file', eval_path,\n ]\n if additional_params:\n cmd += additional_params\n tmpfile = 'test_data_dumps'\n with open(tmpfile, 'w') as f:\n execute_catboost_fit('CPU', cmd, stdout=f)\n with open(tmpfile, 'r') as output:\n line_count = sum(1 for line in output)\n return line_count\n\n model_path = yatest.common.test_output_path('model.bin')\n eval_path = yatest.common.test_output_path('test.eval')\n progress_path = yatest.common.test_output_path('test.cbp')\n additional_params = ['--snapshot-file', progress_path, '-m', model_path]\n\n first_line_count = run_catboost(15, eval_path, additional_params=additional_params)\n second_line_count = run_catboost(30, eval_path, additional_params=additional_params)\n third_line_count = run_catboost(45, eval_path, additional_params=additional_params)\n assert first_line_count == second_line_count == third_line_count\n\n canon_eval_path = yatest.common.test_output_path('canon_test.eval')\n cb_model = catboost.CatBoost()\n cb_model.load_model(model_path)\n random_seed = cb_model.random_seed_\n run_catboost(45, canon_eval_path, additional_params=['-r', str(random_seed)])\n assert filecmp.cmp(canon_eval_path, eval_path)\n\n\ndef test_snapshot_with_interval():\n\n def run_with_timeout(cmd, timeout):\n try:\n execute_catboost_fit('CPU', cmd, timeout=timeout)\n except ExecutionTimeoutError:\n return True\n return False\n\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-T', '4',\n ]\n\n measure_time_iters = 100\n exec_time = timeit.timeit(lambda: execute_catboost_fit('CPU', cmd + ['-i', str(measure_time_iters)]), number=1)\n\n SNAPSHOT_INTERVAL = 1\n TIMEOUT = 5\n TOTAL_TIME = 25\n iters = int(TOTAL_TIME / (exec_time / measure_time_iters))\n\n canon_eval_path = yatest.common.test_output_path('canon_test.eval')\n canon_params = cmd + ['--eval-file', canon_eval_path, '-i', str(iters)]\n execute_catboost_fit('CPU', canon_params)\n\n eval_path = yatest.common.test_output_path('test.eval')\n progress_path = yatest.common.test_output_path('test.cbp')\n model_path = yatest.common.test_output_path('model.bin')\n params = cmd + ['--snapshot-file', progress_path,\n '--snapshot-interval', str(SNAPSHOT_INTERVAL),\n '-m', model_path,\n '--eval-file', eval_path,\n '-i', str(iters)]\n\n was_timeout = False\n while run_with_timeout(params, TIMEOUT):\n was_timeout = True\n assert was_timeout\n assert filecmp.cmp(canon_eval_path, eval_path)\n\n\ndef test_snapshot_with_different_params():\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-T', '4',\n '-i', '10',\n '--snapshot-file', 'snapshot.cbp'\n ]\n\n cmd_1 = cmd + ['--eval-metric', 'Logloss']\n cmd_2 = cmd + ['--eval-metric', 'Accuracy']\n execute_catboost_fit('CPU', cmd_1)\n try:\n execute_catboost_fit('CPU', cmd_2)\n except ExecutionError:\n return\n\n assert False\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_querysoftmax(boosting_type, grow_policy, leaf_estimation_method, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QuerySoftMax',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--leaf-estimation-method', leaf_estimation_method,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_shap_verbose():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_values_path = yatest.common.test_output_path('shapval')\n output_log = yatest.common.test_output_path('log')\n cmd_fit = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '250',\n '-T', '4',\n '-m', output_model_path,\n ]\n execute_catboost_fit('CPU', cmd_fit)\n cmd_shap = [\n CATBOOST_PATH,\n 'fstr',\n '-o', output_values_path,\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--verbose', '12',\n '--fstr-type', 'ShapValues',\n '-T', '4',\n '-m', output_model_path,\n ]\n with open(output_log, 'w') as log:\n yatest.common.execute(cmd_shap, stdout=log)\n with open(output_log, 'r') as log:\n line_count = sum(1 for line in log)\n assert line_count == 5\n\n\ndef test_shap_approximate():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_values_path = yatest.common.test_output_path('shapval')\n cmd_fit = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '250',\n '-T', '4',\n '-m', output_model_path,\n ]\n execute_catboost_fit('CPU', cmd_fit)\n cmd_shap = [\n CATBOOST_PATH,\n 'fstr',\n '-o', output_values_path,\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--verbose', '0',\n '--fstr-type', 'ShapValues',\n '--shap-calc-type', 'Approximate',\n '-T', '4',\n '-m', output_model_path,\n ]\n yatest.common.execute(cmd_shap)\n\n return [local_canonical_file(output_values_path)]\n\n\ndef test_shap_exact():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_values_path = yatest.common.test_output_path('shapval')\n cmd_fit = [\n CATBOOST_PATH,\n 'fit',\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '250',\n '-T', '4',\n '-m', output_model_path,\n ]\n yatest.common.execute(cmd_fit)\n cmd_shap = [\n CATBOOST_PATH,\n 'fstr',\n '-o', output_values_path,\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--verbose', '0',\n '--fstr-type', 'ShapValues',\n '--shap-calc-type', 'Exact',\n '-T', '4',\n '-m', output_model_path,\n ]\n yatest.common.execute(cmd_shap)\n\n return [local_canonical_file(output_values_path)]\n\n\[email protected]('bagging_temperature', ['0', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_querywise_bayesian_bootstrap(bagging_temperature, sampling_unit, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--bootstrap-type', 'Bayesian',\n '--sampling-unit', sampling_unit,\n '--bagging-temperature', bagging_temperature,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('subsample', ['0.5', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_querywise_bernoulli_bootstrap(subsample, sampling_unit, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--bootstrap-type', 'Bernoulli',\n '--sampling-unit', sampling_unit,\n '--subsample', subsample,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nLOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING = ['YetiRankPairwise', 'PairLogitPairwise']\n\n\[email protected]('bagging_temperature', ['0', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected]('loss_function', LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_pairwise_bayesian_bootstrap(bagging_temperature, sampling_unit, loss_function, dev_score_calc_obj_block_size):\n if loss_function == 'YetiRankPairwise' and sampling_unit == 'Group' and bagging_temperature == '1':\n return pytest.xfail(reason='MLTOOLS-1801')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--bootstrap-type', 'Bayesian',\n '--sampling-unit', sampling_unit,\n '--bagging-temperature', bagging_temperature,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('subsample', ['0.5', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected]('loss_function', LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_pairwise_bernoulli_bootstrap(subsample, sampling_unit, loss_function, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--bootstrap-type', 'Bernoulli',\n '--sampling-unit', sampling_unit,\n '--subsample', subsample,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd, env=dict(MKL_CBWR='SSE4_2'))\n eps = 0 if yatest.common.context.sanitize is None else 0.1\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool(eps))]\n\n\[email protected]('loss_function', ['Logloss', 'RMSE', 'MultiClass', 'QuerySoftMax', 'QueryRMSE'])\[email protected]('metric', ['Logloss', 'RMSE', 'MultiClass', 'QuerySoftMax', 'AUC', 'PFound'])\ndef test_bad_metrics_combination(loss_function, metric):\n BAD_PAIRS = {\n 'Logloss': ['RMSE', 'MultiClass'],\n 'RMSE': ['Logloss', 'MultiClass'],\n 'MultiClass': ['Logloss', 'RMSE', 'QuerySoftMax', 'PFound'],\n 'QuerySoftMax': ['RMSE', 'MultiClass', 'QueryRMSE'],\n 'QueryRMSE': ['Logloss', 'MultiClass', 'QuerySoftMax'],\n 'YetiRank': ['Logloss', 'RMSE', 'MultiClass']\n }\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'QueryId']], fmt='%s', delimiter='\\t')\n\n data = np.array([[0, 1, 0, 1, 0], [0, 0, 1, 1, 2], [1, 2, 3, 4, 5]]).T\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, data, fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, data, fmt='%s', delimiter='\\t')\n\n cmd = (\n '--loss-function', loss_function,\n '--custom-metric', metric,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '4',\n '-T', '4',\n )\n\n try:\n execute_catboost_fit('CPU', cmd)\n except Exception:\n assert metric in BAD_PAIRS[loss_function]\n return\n\n assert metric not in BAD_PAIRS[loss_function]\n\n\[email protected]('metric', [('good', ',AUC,'), ('bad', ',')])\ndef test_extra_commas(metric):\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '--custom-metric', metric[1]\n )\n if metric[0] == 'good':\n execute_catboost_fit('CPU', cmd)\n if metric[0] == 'bad':\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef execute_fit_for_test_quantized_pool(loss_function, pool_path, test_path, cd_path, eval_path,\n border_count=128, other_options=()):\n model_path = yatest.common.test_output_path('model.bin')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', pool_path,\n '-t', test_path,\n '--cd', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-x', str(border_count),\n '--feature-border-type', 'GreedyLogSum',\n '-m', model_path,\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd + other_options)\n\n\ndef test_quantized_pool():\n test_path = data_file('higgs', 'test_small')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path=data_file('higgs', 'train_small'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=tsv_eval_path\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path='quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_ignored_features():\n test_path = data_file('higgs', 'test_small')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path=data_file('higgs', 'train_small'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=tsv_eval_path,\n other_options=('-I', '5',)\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path='quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=quantized_eval_path,\n other_options=('-I', '5',)\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_groupid():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa.bin'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_ignored_during_quantization():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path,\n other_options=('-I', '18-36',)\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa_ignore_18_36.bin'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_quantized_test():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa.bin'),\n test_path='quantized://' + data_file('querywise', 'test_borders_from_train_aqtaa.bin'),\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_with_large_grid():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path,\n border_count=1024\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train.quantized_x1024'),\n test_path='quantized://' + data_file('querywise', 'test.quantized_x1024'),\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_learn_without_header_eval_with_header():\n train_path = yatest.common.test_output_path('airlines_without_header')\n with open(data_file('airlines_5K', 'train'), 'r') as with_header_file:\n with open(train_path, 'w') as without_header_file:\n without_header_file.writelines(with_header_file.readlines()[1:])\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cmd_fit = (\n '--loss-function', 'Logloss',\n '-f', train_path,\n '--cd', data_file('airlines_5K', 'cd'),\n '-i', '10',\n '-m', model_path\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('airlines_5K', 'test'),\n '--cd', data_file('airlines_5K', 'cd'),\n '-m', model_path,\n '--has-header'\n )\n yatest.common.execute(cmd_calc)\n\n\ndef test_group_weights_file():\n first_eval_path = yatest.common.test_output_path('first.eval')\n second_eval_path = yatest.common.test_output_path('second.eval')\n\n def run_catboost(eval_path, cd_file, is_additional_query_weights):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', cd_file),\n '-i', '5',\n '-T', '4',\n '--eval-file', eval_path,\n ]\n if is_additional_query_weights:\n cmd += [\n '--learn-group-weights', data_file('querywise', 'train.group_weights'),\n '--test-group-weights', data_file('querywise', 'test.group_weights'),\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(first_eval_path, 'train.cd', True)\n run_catboost(second_eval_path, 'train.cd.group_weight', False)\n assert filecmp.cmp(first_eval_path, second_eval_path)\n\n return [local_canonical_file(first_eval_path)]\n\n\ndef test_group_weights_file_quantized():\n first_eval_path = yatest.common.test_output_path('first.eval')\n second_eval_path = yatest.common.test_output_path('second.eval')\n\n def run_catboost(eval_path, train, test, is_additional_query_weights):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', 'quantized://' + data_file('querywise', train),\n '-t', 'quantized://' + data_file('querywise', test),\n '-i', '5',\n '-T', '4',\n '--eval-file', eval_path,\n ]\n if is_additional_query_weights:\n cmd += [\n '--learn-group-weights', data_file('querywise', 'train.group_weights'),\n '--test-group-weights', data_file('querywise', 'test.group_weights'),\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(first_eval_path, 'train.quantized', 'test.quantized', True)\n run_catboost(second_eval_path, 'train.quantized.group_weight', 'test.quantized.group_weight', False)\n assert filecmp.cmp(first_eval_path, second_eval_path)\n\n return [local_canonical_file(first_eval_path)]\n\n\ndef test_mode_roc():\n eval_path = yatest.common.test_output_path('eval.tsv')\n output_roc_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--counter-calc-method', 'SkipTest',\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n roc_cmd = (\n CATBOOST_PATH,\n 'roc',\n '--eval-file', eval_path,\n '--output-path', output_roc_path\n )\n yatest.common.execute(roc_cmd)\n\n return local_canonical_file(output_roc_path)\n\n\[email protected]('pool', ['adult', 'higgs', 'adult_nan'])\ndef test_convert_model_to_json(pool):\n output_model_path = yatest.common.test_output_path('model')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file(pool, 'train_small'),\n '-t', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--eval-file', output_eval_path,\n '-m', output_model_path,\n '--nan-mode', 'Max' if pool == 'adult_nan' else 'Forbidden',\n '--model-format', 'CatboostBinary,Json'\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path_bin = yatest.common.test_output_path('predict_test_bin.eval')\n formula_predict_path_json = yatest.common.test_output_path('predict_test_json.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-m', output_model_path + '.json',\n '--model-format', 'Json',\n '--output-path', formula_predict_path_json\n )\n yatest.common.execute(calc_cmd)\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-m', output_model_path + '.bin',\n '--output-path', formula_predict_path_bin\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals_with_precision(output_eval_path, formula_predict_path_bin))\n assert (compare_evals_with_precision(output_eval_path, formula_predict_path_json))\n\n\nLOSS_FUNCTIONS_NO_MAPE = ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile', 'Poisson']\n\n\[email protected]('loss_function', LOSS_FUNCTIONS_NO_MAPE)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantized_adult_pool(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n quantized_train_file = 'quantized://' + data_file('quantized_adult', 'train.qbin')\n quantized_test_file = 'quantized://' + data_file('quantized_adult', 'test.qbin')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', quantized_train_file,\n '-t', quantized_test_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n\n execute_catboost_fit('CPU', cmd)\n cd_file = data_file('quantized_adult', 'pool.cd')\n test_file = data_file('quantized_adult', 'test_small.tsv')\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantized_with_one_thread(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n quantized_train_file = 'quantized://' + data_file('querywise', 'train.quantized')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', quantized_train_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '1',\n '-m', output_model_path,\n '--target-border', '0.5',\n )\n print(cmd)\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_eval_result_on_different_pool_type():\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')\n\n def run_catboost(train, test, eval_path):\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--border-count', '128',\n '-f', train,\n '-t', test,\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--target-border', '0.5',\n '--eval-file', eval_path,\n )\n\n execute_catboost_fit('CPU', cmd)\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n\n run_catboost(get_pool_path('train'), get_pool_path('test'), output_eval_path)\n run_catboost(get_pool_path('train', True), get_pool_path('test', True), output_quantized_eval_path)\n\n assert filecmp.cmp(output_eval_path, output_quantized_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_apply_on_different_pool_type():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n cd_file = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', get_pool_path('train', True),\n '--test-set', get_pool_path('test', True),\n '--column-description', cd_file,\n '-i', '10',\n '-T', '4',\n '--target-border', '0.5',\n '--model-file', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n cmd = (\n CATBOOST_PATH, 'calc',\n '--input-path', get_pool_path('test'),\n '--column-description', cd_file,\n '--model-file', output_model_path,\n '--output-path', output_eval_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(cmd)\n cmd = (\n CATBOOST_PATH, 'calc',\n '--input-path', get_pool_path('test', True),\n '--model-file', output_model_path,\n '--output-path', output_quantized_eval_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(cmd)\n assert filecmp.cmp(output_eval_path, output_quantized_eval_path)\n\n\ndef test_apply_output_column_by_idx():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n learn = data_file('black_friday', 'train')\n test = data_file('black_friday', 'test')\n cd = data_file('black_friday', 'cd')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '--learn-set', learn,\n '--test-set', test,\n '--column-description', cd,\n '-i', '10',\n '-T', '4',\n '--model-file', output_model_path,\n '--has-header'\n )\n execute_catboost_fit('CPU', cmd)\n\n column_names = [\n 'User_ID',\n 'Product_ID',\n 'Gender',\n 'Age',\n 'Occupation',\n 'City_Category',\n 'Stay_In_Current_City_Years',\n 'Marital_Status',\n 'Product_Category_1',\n 'Product_Category_2',\n 'Product_Category_3',\n 'Purchase'\n ]\n output_columns = ','.join(['#{}:{}'.format(idx, name) for idx, name in enumerate(column_names)])\n output_columns = 'RawFormulaVal,' + output_columns\n\n cmd = (\n CATBOOST_PATH, 'calc',\n '--input-path', test,\n '--column-description', cd,\n '--model-file', output_model_path,\n '--output-path', output_eval_path,\n '--output-columns', output_columns,\n '--has-header'\n )\n yatest.common.execute(cmd)\n\n with open(output_eval_path, 'r') as f:\n eval_lines = f.readlines()\n with open(test, 'r') as f:\n test_lines = f.readlines()\n\n assert len(eval_lines) == len(test_lines)\n for i in range(len(eval_lines)):\n eval_line = eval_lines[i].split('\\t')[1:] # skip RawFormulaVal\n test_line = test_lines[i].split('\\t')\n\n for eval_column, test_column in zip(eval_line, test_line):\n assert eval_column == test_column\n\n\[email protected](\n 'dataset_name,loss_function,has_pairs,has_group_weights',\n [\n ('adult_small_broken_features', 'Logloss', False, False),\n ('querywise_broken_pairs', 'RMSE', True, False),\n ('querywise_broken_group_weights', 'RMSE', False, True),\n ]\n)\ndef test_broken_dsv_format(dataset_name, loss_function, has_pairs, has_group_weights):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n # iterations and threads are specified just to finish fast if test is xpass\n cmd = (\n '--loss-function', loss_function,\n '--learn-set', data_file('broken_format', dataset_name, 'train'),\n '--test-set', data_file('broken_format', dataset_name, 'test'),\n '--column-description', data_file('broken_format', dataset_name, 'train.cd'),\n '-i', '1',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n if has_pairs:\n cmd += (\n '--learn-pairs', data_file('broken_format', dataset_name, 'train.pairs'),\n '--test-pairs', data_file('broken_format', dataset_name, 'test.pairs'),\n )\n if has_group_weights:\n cmd += (\n '--learn-group-weights', data_file('broken_format', dataset_name, 'train.group_weights'),\n '--test-group-weights', data_file('broken_format', dataset_name, 'test.group_weights'),\n )\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]_fixtures('compressed_data')\[email protected](\n 'loss_function,eval_metric,boosting_type',\n [\n ('QueryRMSE', 'NDCG', 'Plain'),\n ('QueryRMSE', 'NDCG', 'Ordered'),\n # Boosting type 'Ordered' is not supported for YetiRankPairwise and PairLogitPairwise\n ('YetiRankPairwise', 'NDCG', 'Plain'),\n ('PairLogit:max_pairs=30', 'PairLogit:max_pairs=30', 'Plain'),\n ('PairLogitPairwise:max_pairs=30', 'NDCG', 'Plain'),\n ('PairLogitPairwise:max_pairs=30', 'PairLogit:max_pairs=30', 'Plain'),\n ],\n ids=[\n 'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Ordered',\n 'loss_function=YetiRankPairwise,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=PairLogit:max_pairs=30,eval_metric=PairLogit:max_pairs=30,boosting_type=Plain',\n 'loss_function=PairLogitPairwise:max_pairs=30,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=PairLogitPairwise:max_pairs=30,eval_metric=PairLogit:max_pairs=30,boosting_type=Plain'\n ]\n)\ndef test_groupwise_with_cat_features(compressed_data, loss_function, eval_metric, boosting_type):\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', loss_function,\n '-f', os.path.join(compressed_data.name, 'mslr_web1k', 'train'),\n '-t', os.path.join(compressed_data.name, 'mslr_web1k', 'test'),\n '--column-description', os.path.join(compressed_data.name, 'mslr_web1k', 'cd.with_cat_features'),\n '--boosting-type', boosting_type,\n '-i', '100',\n '-T', '8',\n '--eval-metric', eval_metric,\n '--metric-period', '100',\n '--use-best-model', 'false',\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(test_error_path, diff_tool=diff_tool(1e-5))]\n\n\ndef test_gradient_walker():\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--boosting-type', 'Ordered',\n '--max-ctr-complexity', '4',\n '--leaf-estimation-iterations', '10',\n '--leaf-estimation-backtracking', 'AnyImprovement',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\n# training with pairwise scoring with categorical features on CPU does not yet support one-hot features\n# so they are disabled by default, explicit non-default specification should be an error\[email protected](\n 'loss_function', ['YetiRankPairwise', 'PairLogitPairwise'],\n ids=['loss_function=YetiRankPairwise', 'loss_function=PairLogitPairwise']\n)\ndef test_groupwise_with_bad_one_hot_max_size(loss_function):\n cmd = (\n '--loss-function', loss_function,\n '--has-header',\n '-f', data_file('black_friday', 'train'),\n '-t', data_file('black_friday', 'test'),\n '--column-description', data_file('black_friday', 'cd'),\n '--boosting-type', 'Plain',\n '-i', '10',\n '-T', '4',\n '--eval-metric', 'NDCG',\n '--one_hot_max_size', '10'\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_load_quantized_pool_with_double_baseline():\n # Dataset with 3 random columns, first column is Target, seconds columns is Num, third column\n # is Baseline.\n #\n # There are only 10 rows in dataset.\n cmd = (\n '-f', 'quantized://' + data_file('quantized_with_baseline', 'dataset.qbin'),\n '-i', '10')\n\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_write_predictions_to_streams():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n calc_output_eval_path_redirected = yatest.common.test_output_path('calc_test.eval')\n\n cmd = (\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--eval-file', output_eval_path,\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-m', output_model_path\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '--output-path', 'stream://stdout',\n )\n with open(calc_output_eval_path_redirected, 'w') as catboost_stdout:\n yatest.common.execute(calc_cmd, stdout=catboost_stdout)\n\n assert compare_evals(output_eval_path, calc_output_eval_path_redirected)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '--output-path', 'stream://stderr'\n )\n with open(calc_output_eval_path_redirected, 'w') as catboost_stderr:\n yatest.common.execute(calc_cmd, stderr=catboost_stderr)\n\n assert compare_evals(output_eval_path, calc_output_eval_path_redirected)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_mvs_bootstrap(boosting_type):\n def run_catboost(eval_path, mvs_sample_rate):\n cmd = [\n '--use-best-model', 'false',\n '--allow-writing-files', 'false',\n '--loss-function', 'Logloss',\n '--max-ctr-complexity', '5',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', data_file('airlines_5K', 'cd'),\n '--has-header',\n '--boosting-type', boosting_type,\n '--bootstrap-type', 'MVS',\n '--subsample', mvs_sample_rate,\n '-i', '50',\n '-w', '0.03',\n '-T', '6',\n '-r', '0',\n '--leaf-estimation-iterations', '10',\n '--eval-file', eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n ref_eval_path = yatest.common.test_output_path('test.eval')\n run_catboost(ref_eval_path, '0.5')\n\n for sample_rate in ('0.1', '0.9'):\n eval_path = yatest.common.test_output_path('test_{}.eval'.format(sample_rate))\n run_catboost(eval_path, sample_rate)\n assert (filecmp.cmp(ref_eval_path, eval_path) is False)\n\n return [local_canonical_file(ref_eval_path)]\n\n\ndef test_simple_ctr():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n simple_ctr = ','.join((\n 'Borders:TargetBorderCount=15',\n 'Buckets:TargetBorderCount=15',\n 'Borders:TargetBorderType=MinEntropy',\n 'Counter:CtrBorderCount=20',\n ))\n execute_catboost_fit('CPU', (\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--simple-ctr', simple_ctr,\n ))\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_output_options():\n output_options_path = 'training_options.json'\n train_dir = 'catboost_info'\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--train-dir', train_dir,\n '--training-options-file', output_options_path,\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(os.path.join(train_dir, output_options_path))\n\n\ndef test_target_border():\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--target-border', '0.3'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_monotonic_constraint():\n train_pool = catboost.Pool(\n data_file('higgs', 'train_small'),\n column_description=data_file('higgs', 'train.cd')\n )\n test_pool = catboost.Pool(\n data_file('higgs', 'test_small'),\n column_description=data_file('higgs', 'train.cd')\n )\n monotone_constraints = [0, 0, 1, -1, 0, 0, 1, 0, -1, 1, 1, -1, 0, 1, 0, 0, -1, 1, 1, -1, 0, 0, 0, 0, 0, -1, 0, -1]\n model = catboost.CatBoostRegressor(\n n_estimators=100,\n learning_rate=0.2,\n monotone_constraints=monotone_constraints,\n verbose=False\n ).fit(train_pool, eval_set=test_pool)\n\n dummy_data = np.zeros((1, test_pool.num_col()))\n dummy_target = np.zeros(len(dummy_data))\n feature_stats = model.calc_feature_statistics(dummy_data, dummy_target, plot=False)\n for feature_index, feature_name in enumerate(model.feature_names_):\n monotonicity = monotone_constraints[feature_index]\n if monotonicity == 0:\n continue\n feature_borders = feature_stats[feature_name]['borders']\n if len(feature_borders) == 0:\n continue\n mid_values = (feature_borders[:-1] + feature_borders[1:]) / 2\n min_value = feature_borders[0] - 1\n max_value = feature_borders[-1] + 1\n feature_values = np.array([min_value] + list(mid_values) + [max_value])\n for obj in test_pool.get_features():\n obj_variations = np.zeros((len(feature_values), test_pool.num_col()))\n obj_variations[:] = obj.reshape((1, -1))\n obj_variations[:, feature_index] = feature_values\n model_predicts = model.predict(obj_variations)\n prediction_deltas = model_predicts[1:] - model_predicts[:-1]\n assert np.all(prediction_deltas * monotonicity >= 0)\n\n\ndef test_different_formats_of_monotone_constraints():\n eval_path = yatest.common.test_output_path('eval.tsv')\n eval_path_with_monotone1 = yatest.common.test_output_path('eval_monotone1.tsv')\n eval_path_with_monotone2 = yatest.common.test_output_path('eval_monotone2.tsv')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train_with_id.cd'),\n '-i', '20'\n ]\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path])\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone1, '--monotone-constraints', '(0,0,0,1,0,-1)'])\n assert not filecmp.cmp(eval_path_with_monotone1, eval_path)\n\n for constraints in ['3:1,5:-1', 'F0:1,F1:-1']:\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone2, '--monotone-constraints', constraints])\n assert filecmp.cmp(eval_path_with_monotone1, eval_path_with_monotone2)\n\n params_file = yatest.common.test_output_path(\"params.json\")\n for constraints in ['3:1,5:-1', 'F0:1,F1:-1', [0, 0, 0, 1, 0, -1], {3: 1, 5: -1}, {'F0': 1, 'F1': -1}]:\n json.dump({'monotone_constraints': constraints}, open(params_file, 'w'))\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone2, '--params-file', params_file])\n assert filecmp.cmp(eval_path_with_monotone1, eval_path_with_monotone2)\n\n\nclass TestModelWithoutParams(object):\n\n @pytest.fixture(\n params=[\n ('cut-info', 'RMSE'),\n ('cut-params', 'RMSE'),\n ('cut-info', 'QueryRMSE'),\n ('cut-params', 'QueryRMSE'),\n ],\n ids=lambda param: '-'.join(param),\n )\n def model_etc(self, request):\n cut, loss = request.param\n model_json = yatest.common.test_output_path('model.json')\n learn_set = data_file('querywise', 'train')\n test_set = data_file('querywise', 'test')\n cd = data_file('querywise', 'train.cd')\n cmd = (\n '--loss-function', loss,\n '--learn-set', learn_set,\n '--test-set', test_set,\n '--column-description', cd,\n '--iterations', '10',\n '--model-file', model_json,\n '--model-format', 'Json',\n '--use-best-model', 'false'\n )\n execute_catboost_fit('CPU', cmd)\n model = json.load(open(model_json))\n if cut == 'cut-info':\n model.pop('model_info')\n if cut == 'cut-params':\n model['model_info'].pop('params')\n json.dump(model, open(model_json, 'wt'))\n return model_json, learn_set, test_set, cd\n\n def test_ostr(self, model_etc):\n model_json, train_set, test_set, cd = model_etc\n ostr_result = yatest.common.test_output_path('result.txt')\n ostr_cmd = (\n CATBOOST_PATH, 'ostr',\n '--learn-set', train_set,\n '--test-set', test_set,\n '--column-description', cd,\n '--model-file', model_json,\n '--model-format', 'Json',\n '--output-path', ostr_result,\n )\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(ostr_cmd)\n\n @pytest.mark.parametrize('should_fail,fstr_type', [\n (False, 'FeatureImportance'),\n (False, 'PredictionValuesChange'),\n (True, 'LossFunctionChange'),\n (False, 'ShapValues'),\n ])\n def test_fstr(self, model_etc, fstr_type, should_fail):\n model_json, train_set, _, cd = model_etc\n fstr_result = yatest.common.test_output_path('result.txt')\n fstr_cmd = (\n CATBOOST_PATH, 'fstr',\n '--input-path', train_set,\n '--column-description', cd,\n '--model-file', model_json,\n '--model-format', 'Json',\n '--output-path', fstr_result,\n '--fstr-type', fstr_type,\n )\n if should_fail:\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(fstr_cmd)\n else:\n yatest.common.execute(fstr_cmd)\n\n\ndef test_equal_feature_names():\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd.equal_names'),\n ))\n\n\ndef enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count, only_baseline=False):\n if eval_mode == 'OneVsOthers':\n baseline = 'Baseline_set_{set_idx}_fold_{fold_idx}'\n else:\n baseline = 'Baseline_fold_{fold_idx}'\n if not only_baseline:\n testing = 'Testing_set_{set_idx}_fold_{fold_idx}'\n dirs = []\n for set_idx in range(set_count):\n for fold_idx in range(offset, offset + fold_count):\n fold = baseline.format(fold_idx=fold_idx, set_idx=set_idx)\n if fold not in dirs:\n dirs += [fold]\n if not only_baseline:\n fold = testing.format(fold_idx=fold_idx, set_idx=set_idx)\n dirs += [fold]\n return dirs\n\n\[email protected]('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])\[email protected]('features_to_eval', ['0-6', '0-6;7-13'], ids=['one_set', 'two_sets'])\[email protected]('offset', [0, 2])\ndef test_eval_feature(eval_mode, features_to_eval, offset):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n train_dir = yatest.common.test_output_path('')\n fold_count = 2\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('higgs', 'train_small'),\n '--cd', data_file('higgs', 'train.cd'),\n '--features-to-evaluate', features_to_eval,\n '--feature-eval-mode', eval_mode,\n '-i', '30',\n '-T', '4',\n '-w', '0.7',\n '--feature-eval-output-file', output_eval_path,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', 'Object',\n '--fold-size', '20',\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n )\n\n yatest.common.execute(cmd)\n\n pj = os.path.join\n set_count = len(features_to_eval.split(';'))\n artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):\n artifacts += [\n local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),\n local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),\n ]\n return artifacts\n\n\[email protected]('offset', [0, 2])\ndef test_eval_feature_empty_feature_set(offset):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n train_dir = yatest.common.test_output_path('')\n fold_count = 2\n eval_mode = 'OneVsNone'\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('higgs', 'train_small'),\n '--cd', data_file('higgs', 'train.cd'),\n '--feature-eval-mode', eval_mode,\n '-i', '30',\n '-T', '4',\n '-w', '0.7',\n '--feature-eval-output-file', output_eval_path,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', 'Object',\n '--fold-size', '20',\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n )\n\n yatest.common.execute(cmd)\n\n pj = os.path.join\n set_count = 1\n artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count, only_baseline=True):\n artifacts += [\n local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),\n local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),\n ]\n return artifacts\n\n\[email protected]('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])\[email protected]('fold_size_unit', ['Object', 'Group'])\ndef test_eval_feature_timesplit(eval_mode, fold_size_unit):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n train_dir = yatest.common.test_output_path('')\n fold_count = 2\n features_to_eval = '2-5;10-15'\n offset = 2\n fold_size = 500\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '--features-to-evaluate', features_to_eval,\n '--feature-eval-mode', eval_mode,\n '-i', '30',\n '-T', '4',\n '-w', '0.7',\n '--feature-eval-output-file', output_eval_path,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', fold_size_unit,\n '--fold-size', str(fold_size),\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n '--learn-timestamps', data_file('querywise', 'train.timestamps'),\n '--timesplit-quantile', '0.75'\n )\n\n yatest.common.execute(cmd)\n\n pj = os.path.join\n set_count = len(features_to_eval.split(';'))\n artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):\n artifacts += [\n local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),\n local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),\n ]\n return artifacts\n\n\[email protected]('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])\[email protected]('features_to_eval', ['2-5', '2-5;10-15'], ids=['one_set', 'two_sets'])\[email protected]('offset', [0, 2])\ndef test_eval_feature_snapshot(eval_mode, features_to_eval, offset):\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n fold_count = 2\n snapshot_interval = 1\n\n def make_cmd(summary, train_dir):\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '200',\n '-T', '4',\n '-w', '0.1',\n '--boost-from-average', 'False',\n '--permutations', '1',\n '--snapshot-interval', str(snapshot_interval),\n '--features-to-evaluate', features_to_eval,\n '--feature-eval-mode', eval_mode,\n '--feature-eval-output-file', summary,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', 'Group',\n '--fold-size', '40',\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n )\n\n reference_summary = yatest.common.test_output_path('reference_feature.eval')\n reference_dir = yatest.common.test_output_path('reference')\n yatest.common.execute(make_cmd(summary=reference_summary, train_dir=reference_dir))\n\n snapshot_summary = yatest.common.test_output_path('snapshot_feature.eval')\n snapshot_dir = yatest.common.test_output_path('snapshot')\n snapshot = yatest.common.test_output_path('eval_feature.snapshot')\n eval_with_snapshot_cmd = make_cmd(summary=snapshot_summary, train_dir=snapshot_dir) + ('--snapshot-file', snapshot,)\n\n def stop_after_timeout(cmd, timeout):\n try:\n yatest.common.execute(cmd, timeout=timeout)\n except ExecutionTimeoutError:\n pass\n\n resume_from_snapshot_count = 15\n for idx in range(resume_from_snapshot_count):\n timeout = 0.5 if idx % 2 == 0 else snapshot_interval + 0.1\n stop_after_timeout(cmd=eval_with_snapshot_cmd, timeout=timeout)\n yatest.common.execute(['rm', '-rf', snapshot_dir])\n yatest.common.execute(eval_with_snapshot_cmd)\n\n assert filecmp.cmp(reference_summary, snapshot_summary)\n\n pj = os.path.join\n set_count = len(features_to_eval.split(';'))\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):\n assert filecmp.cmp(pj(reference_dir, output_dir, test_err_log), pj(snapshot_dir, output_dir, test_err_log))\n assert filecmp.cmp(pj(reference_dir, output_dir, fstr_file), pj(snapshot_dir, output_dir, fstr_file))\n\n\ndef test_eval_feature_snapshot_wrong_options():\n summary = yatest.common.test_output_path('eval_feature_summary')\n snapshot = yatest.common.test_output_path('eval_feature_snapshot')\n\n def make_cmd(fold_size):\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '600',\n '-T', '4',\n '-w', '0.1',\n '--permutations', '1',\n '--snapshot-interval', '1',\n '--features-to-evaluate', '2-5',\n '--feature-eval-mode', 'OneVsAll',\n '--feature-eval-output-file', summary,\n '--offset', '0',\n '--fold-count', '5',\n '--fold-size-unit', 'Group',\n '--fold-size', str(fold_size),\n '--snapshot-file', snapshot\n )\n\n def stop_after_timeout(cmd, timeout):\n try:\n yatest.common.execute(cmd, timeout=timeout)\n except ExecutionTimeoutError:\n pass\n\n stop_after_timeout(cmd=make_cmd(fold_size=40), timeout=3)\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(make_cmd(fold_size=20))\n\n\ndef test_eval_feature_parse_timestamps():\n summary = yatest.common.test_output_path('eval_feature_summary')\n\n def make_cmd(timestamps_file):\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '600',\n '-T', '4',\n '-w', '0.1',\n '--permutations', '1',\n '--snapshot-interval', '1',\n '--features-to-evaluate', '2-5',\n '--feature-eval-mode', 'OneVsAll',\n '--feature-eval-output-file', summary,\n '--offset', '0',\n '--fold-count', '5',\n '--fold-size-unit', 'Group',\n '--fold-size', '40',\n '--learn-timestamps', data_file('querywise', timestamps_file),\n '--timesplit-quantile', '0.75'\n )\n\n yatest.common.execute(make_cmd('train.timestamps'))\n\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(make_cmd('train.group_weights'))\n\n\ndef test_eval_feature_relative_fold_size():\n summary = yatest.common.test_output_path('eval_feature_summary')\n\n def make_cmd():\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '100',\n '-T', '4',\n '-w', '0.1',\n '--permutations', '1',\n '--snapshot-interval', '1',\n '--features-to-evaluate', '2-5',\n '--feature-eval-mode', 'OneVsAll',\n '--feature-eval-output-file', summary,\n '--offset', '0',\n '--fold-count', '5',\n '--fold-size-unit', 'Group',\n '--relative-fold-size', '0.1',\n )\n\n yatest.common.execute(make_cmd())\n\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(make_cmd() + ('--fold-size', '40',))\n\n\nTEST_METRIC_DESCRIPTION_METRICS_LIST = ['Logloss', 'Precision', 'AUC']\n\n\[email protected]('dataset_has_weights', [True, False], ids=['dataset_has_weights=True', 'dataset_has_weights=False'])\[email protected]('eval_metric_loss', TEST_METRIC_DESCRIPTION_METRICS_LIST,\n ids=['eval_loss=' + mode for mode in TEST_METRIC_DESCRIPTION_METRICS_LIST])\[email protected]('eval_metric_use_weights', [True, False, None],\n ids=['eval_weights=' + str(mode) for mode in [True, False, None]])\[email protected]('custom_metric_loss', TEST_METRIC_DESCRIPTION_METRICS_LIST,\n ids=['custom_loss=' + mode for mode in TEST_METRIC_DESCRIPTION_METRICS_LIST])\[email protected]('custom_metric_use_weights', [True, False, None],\n ids=['custom_weights=' + str(mode) for mode in [True, False, None]])\ndef test_metric_description(dataset_has_weights, eval_metric_loss, eval_metric_use_weights, custom_metric_loss, custom_metric_use_weights):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n if dataset_has_weights:\n train_pool_filename = data_file('adult_weight', 'train_weight')\n test_pool_filename = data_file('adult_weight', 'test_weight')\n pool_cd_filename = data_file('adult_weight', 'train.cd')\n else:\n train_pool_filename = data_file('adult', 'train_small')\n test_pool_filename = data_file('adult', 'test_small')\n pool_cd_filename = data_file('adult', 'train.cd')\n\n eval_metric = eval_metric_loss\n if eval_metric == 'AUC':\n eval_metric += ':hints=skip_train~false'\n if eval_metric_use_weights is not None:\n eval_metric += ';' if eval_metric_loss == 'AUC' else ':'\n eval_metric += 'use_weights=' + str(eval_metric_use_weights)\n\n custom_metric = custom_metric_loss\n if custom_metric == 'AUC':\n custom_metric += ':hints=skip_train~false'\n if custom_metric_use_weights is not None:\n custom_metric += ';' if custom_metric_loss == 'AUC' else ':'\n custom_metric += 'use_weights=' + str(custom_metric_use_weights)\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', train_pool_filename,\n '-t', test_pool_filename,\n '--cd', pool_cd_filename,\n '-i', '10',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-metric', eval_metric,\n '--custom-metric', custom_metric,\n )\n should_fail = not dataset_has_weights and (eval_metric_use_weights is not None or custom_metric_use_weights is not None)\n try:\n execute_catboost_fit('CPU', cmd)\n except ExecutionError:\n assert should_fail\n return\n for filename in [learn_error_path, test_error_path]:\n with open(filename, 'r') as f:\n metrics_descriptions = f.readline().split('\\t')[1:] # without 'iter' column\n metrics_descriptions[-1] = metrics_descriptions[-1][:-1] # remove '\\n' symbol\n unique_metrics_descriptions = set([s.lower() for s in metrics_descriptions])\n assert len(metrics_descriptions) == len(unique_metrics_descriptions)\n expected_objective_metric_description = 'Logloss'\n\n if dataset_has_weights:\n expected_eval_metric_description = \\\n eval_metric_loss if eval_metric_use_weights is None else eval_metric_loss + ':use_weights=' + str(eval_metric_use_weights)\n\n if custom_metric_loss == 'AUC':\n expected_custom_metrics_descriptions = \\\n ['AUC' if custom_metric_use_weights is None else 'AUC:use_weights=' + str(custom_metric_use_weights)]\n else:\n expected_custom_metrics_descriptions = (\n [custom_metric_loss + ':use_weights=False', custom_metric_loss + ':use_weights=True']\n if custom_metric_use_weights is None\n else [custom_metric_loss + ':use_weights=' + str(custom_metric_use_weights)])\n else:\n expected_eval_metric_description = eval_metric_loss\n expected_custom_metrics_descriptions = [custom_metric_loss]\n assert unique_metrics_descriptions == set(s.lower() for s in [expected_objective_metric_description] + [expected_eval_metric_description] + expected_custom_metrics_descriptions)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_leafwise_scoring():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--learn-err-log', learn_error_path\n ]\n execute_catboost_fit('CPU', cmd)\n learn_errors_log = open(learn_error_path).read()\n execute_catboost_fit('CPU', cmd + ['--dev-leafwise-scoring'])\n new_learn_errors_log = open(learn_error_path).read()\n assert new_learn_errors_log == learn_errors_log\n\n\ndef test_group_features():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_predictions_path = yatest.common.test_output_path('test_predictions.tsv')\n model_path = yatest.common.test_output_path('model.bin')\n fit_cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '-m', model_path,\n '--learn-err-log', learn_error_path\n ]\n execute_catboost_fit('CPU', fit_cmd)\n calc_cmd = [\n CATBOOST_PATH,\n 'calc',\n '-m', model_path,\n '--input-path', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '--output-path', test_predictions_path,\n '--output-columns', 'Probability'\n ]\n yatest.common.execute(calc_cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_predictions_path)]\n\n\ndef test_model_sum():\n model_path = yatest.common.test_output_path('model.bin')\n model_eval = yatest.common.test_output_path('model_eval.txt')\n execute_catboost_fit('CPU', [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '10',\n '-m', model_path,\n '-t', data_file('adult', 'test_small'),\n '--eval-file', model_eval,\n '--output-columns', 'SampleId,RawFormulaVal',\n ])\n\n sum_path = yatest.common.test_output_path('sum.bin')\n yatest.common.execute([\n CATBOOST_PATH,\n 'model-sum',\n '--model-with-weight', '{}={}'.format(model_path, 0.75),\n '--model-with-weight', '{}={}'.format(model_path, 0.25),\n '--output-path', sum_path,\n ])\n\n sum_eval = yatest.common.test_output_path('sum_eval.txt')\n yatest.common.execute([\n CATBOOST_PATH,\n 'calc',\n '-m', sum_path,\n '--input-path', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '--output-path', sum_eval,\n ])\n yatest.common.execute(get_limited_precision_dsv_diff_tool(0) + [model_eval, sum_eval])\n\n\ndef test_external_feature_names():\n fstr_cd_with_id_path = yatest.common.test_output_path('fstr_cd_with_id.tsv')\n fstr_cd_without_id_path = yatest.common.test_output_path('fstr_cd_without_id.tsv')\n\n for cd_has_feature_names in [False, True]:\n if cd_has_feature_names:\n cd_file = data_file('adult', 'train_with_id.cd')\n fstr_path = fstr_cd_with_id_path\n else:\n cd_file = data_file('adult', 'train.cd')\n fstr_path = fstr_cd_without_id_path\n\n cmd = (\n '--loss-function', 'Logloss',\n '--target-border', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', cd_file,\n '-i', '10',\n '-T', '4',\n '--feature-names-path', data_file('adult', 'feature_names'),\n '--fstr-type', 'FeatureImportance',\n '--fstr-file', fstr_path\n )\n execute_catboost_fit('CPU', cmd)\n\n assert filecmp.cmp(fstr_cd_with_id_path, fstr_cd_without_id_path)\n\n return [local_canonical_file(fstr_cd_with_id_path)]\n\n\ndef test_diffusion_temperature():\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--langevin', 'True',\n '--diffusion-temperature', '1000',\n '--eval-file', output_eval_path\n ]\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('config', [('Constant', 0.2, 0.1), ('Constant', 2, 0.1), ('Decreasing', 0.2, 0.1)])\ndef test_model_shrink_correct(config):\n mode, rate, lr = config\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--eval-file', output_eval_path,\n '--model-shrink-mode', mode,\n '--model-shrink-rate', str(rate),\n '--learning-rate', str(lr)\n ]\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('config', [('Constant', 20, 0.1), ('Constant', 10, 0.1), ('Decreasing', 2, 0.1)])\ndef test_model_shrink_incorrect(config):\n mode, rate, lr = config\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--eval-file', output_eval_path,\n '--model-shrink-mode', mode,\n '--model-shrink-rate', str(rate),\n '--learning-rate', str(lr)\n ]\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('average', ['Macro', 'Micro', 'Weighted'])\ndef test_total_f1_params(average):\n return do_test_eval_metrics(\n metric='TotalF1:average=' + average,\n metric_period='1',\n train=data_file('cloudness_small', 'train_small'),\n test=data_file('cloudness_small', 'test_small'),\n cd=data_file('cloudness_small', 'train.cd'),\n loss_function='MultiClass'\n )\n\n\ndef test_tweedie():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n cmd = (\n '--loss-function', 'Tweedie:variance_power=1.5',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '-i', '100',\n '--learning-rate', '0.5',\n '--learn-err-log', learn_error_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('separator_type', SEPARATOR_TYPES)\[email protected]('feature_estimators', TEXT_FEATURE_ESTIMATORS)\ndef test_fit_binclass_with_text_features(boosting_type, separator_type, feature_estimators):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd_binclass')\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--text-processing', json.dumps(text_processing),\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('separator_type', SEPARATOR_TYPES)\[email protected]('feature_estimators', TEXT_FEATURE_ESTIMATORS)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_fit_multiclass_with_text_features(separator_type, feature_estimators, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Accuracy',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--text-processing', json.dumps(text_processing),\n '--column-description', cd_file,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('grow_policy', GROW_POLICIES)\ndef test_shrink_model_with_text_features(grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n loss_function = 'MultiClass'\n feature_estimators = 'BoW,NaiveBayes,BM25'\n\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Accuracy',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--column-description', cd_file,\n '--text-processing', json.dumps(text_processing),\n '--grow-policy', grow_policy,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'true',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('virtual_ensembles_count', ['1', '10'])\[email protected]('prediction_type', ['TotalUncertainty', 'VirtEnsembles'])\[email protected]('loss_function', ['RMSE', 'RMSEWithUncertainty'])\ndef test_uncertainty_prediction(virtual_ensembles_count, prediction_type, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n train_path = data_file('querywise', 'train')\n test_path = data_file('querywise', 'test')\n cd_path = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '-f', train_path,\n '-t', test_path,\n '--loss-function', loss_function,\n '--column-description', cd_path,\n '--posterior-sampling', 'true',\n '-i', '200',\n '-T', '4',\n '-m', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--virtual-ensembles-count', virtual_ensembles_count,\n '--prediction-type', prediction_type,\n )\n yatest.common.execute(calc_cmd)\n\n model = catboost.CatBoost()\n model.load_model(output_model_path)\n pool = catboost.Pool(test_path, column_description=cd_path)\n py_preds = model.virtual_ensembles_predict(\n pool,\n prediction_type=prediction_type,\n virtual_ensembles_count=int(virtual_ensembles_count))\n\n cli_preds = np.genfromtxt(\n formula_predict_path,\n delimiter='\\t',\n dtype=float,\n skip_header=True)\n assert(np.allclose(py_preds.reshape(-1,), cli_preds[:, 1:].reshape(-1,), rtol=1e-10))\n\n return local_canonical_file(formula_predict_path)\n\n\[email protected]('loss_function', ['RMSE', 'RMSEWithUncertainty'])\ndef test_uncertainty_prediction_requirements(loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n train_path = data_file('querywise', 'train')\n test_path = data_file('querywise', 'test')\n cd_path = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '-f', train_path,\n '-t', test_path,\n '--loss-function', loss_function,\n '--column-description', cd_path,\n '-i', '200',\n '-T', '4',\n '-m', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'VirtEnsembles'\n )\n try:\n yatest.common.execute(calc_cmd)\n except:\n return\n # assert replaced to warning\n # assert False\n\n\nDICTIONARIES_OPTIONS = [\n {\n \"Simple\": \"token_level_type=Word:occurrence_lower_bound=50\"\n },\n {\n \"UniGramOccur5\": \"occurrence_lower_bound=5:token_level_type=Letter\",\n \"BiGramOccur2\": \"occurrence_lower_bound=2:gram_order=2:token_level_type=Letter\",\n \"WordDictOccur1\": \"occurrence_lower_bound=1:token_level_type=Word\",\n \"WordDictOccur2\": \"occurrence_lower_bound=2:token_level_type=Word\",\n \"WordDictOccur3\": \"occurrence_lower_bound=3:token_level_type=Word\"\n },\n {\n \"Unigram\": \"gram_order=1:token_level_type=Letter:occurrence_lower_bound=50\",\n \"Bigram\": \"gram_order=2:token_level_type=Letter:occurrence_lower_bound=50\",\n \"Trigram\": \"gram_order=3:token_level_type=Letter:occurrence_lower_bound=50\"\n },\n {\n \"Letter\": \"token_level_type=Letter:occurrence_lower_bound=50\",\n \"Word\": \"token_level_type=Word:occurrence_lower_bound=50\"\n }\n]\n\n\[email protected]('dictionaries', DICTIONARIES_OPTIONS)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_text_processing_options(dictionaries, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n dictionaries = ','.join([key + ':' + value for key, value in dictionaries.items()])\n feature_estimators = 'BM25,BoW,NaiveBayes'\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Accuracy',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--column-description', cd_file,\n '--dictionaries', dictionaries,\n '--feature-calcers', feature_estimators,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_fit_with_per_feature_text_options(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n text_processing = {\n 'tokenizers': [\n {'tokenizer_id': 'Space', 'delimiter': ' '},\n {'tokenizer_id': 'Comma', 'delimiter': ','},\n ],\n 'dictionaries': [\n {'dictionary_id': 'Word', 'token_level_type': 'Word', 'occurrence_lower_bound': '50'},\n {'dictionary_id': 'Bigram', 'token_level_type': 'Word', 'gram_order': '2', 'occurrence_lower_bound': '50'},\n {'dictionary_id': 'Trigram', 'token_level_type': 'Letter', 'gram_order': '3', 'occurrence_lower_bound': '50'},\n ],\n 'feature_processing': {\n '0': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes']},\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Bigram', 'Trigram'], 'feature_calcers': ['BoW']},\n ],\n '1': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes', 'BM25']},\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Trigram'], 'feature_calcers': ['BoW', 'BM25']},\n ],\n '2': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word', 'Bigram', 'Trigram'], 'feature_calcers': ['BoW']},\n ],\n }\n }\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd_binclass')\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--text-processing', json.dumps(text_processing),\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n"
] |
[
[
"numpy.dot",
"numpy.hstack",
"pandas.read_csv",
"numpy.log",
"numpy.allclose",
"numpy.random.seed",
"numpy.random.random",
"numpy.arange",
"numpy.genfromtxt",
"numpy.all",
"numpy.concatenate",
"numpy.random.randn",
"numpy.mean",
"numpy.float32",
"numpy.savetxt",
"numpy.array",
"numpy.random.RandomState",
"numpy.loadtxt"
]
] |
dimdamop/single-neuron
|
[
"fa649bcd2c7cc68b46c87e63e3c5869f772fecdf"
] |
[
"tests/test_neuron.py"
] |
[
"# Author: Dimitrios Damopoulos\n# MIT license (see LICENCE.txt in the top-level folder)\n\nimport unittest \n\nimport os\nfrom tempfile import mkstemp \nfrom os.path import exists\n\nimport numpy as np\nfrom numpy import random\nimport string\nfrom random import choice as std_choice\n\nfrom single_neuron import neuron as neuron\n\ncsv_files_to_check_n = 100\nmax_column_header_len = 30\nmax_features_n = 100\nmax_samples_n = 100000\n\nclass CsvParsingTestCase(unittest.TestCase):\n \n @classmethod\n def synthetic_csv(cls, fd_out, N, m, sep, name_delim='', comment_char='#'):\n \"\"\"\n Creates a synthetic CSV character stream with column headers, values and \n extra lines which are either empty or they are comments.\n\n Args:\n fd_out (output stream): Where to write the CSV. Typically, that is a\n file descriptor.\n N (int): The number of samples\n m (int): The number of features per sample\n sep (str): The character that separates the values in the CSV file\n name_delim (str): An optional character to add before and after \n every header column name.\n comment_char (str): When the first non-whitespace character of a \n line is the character `comment_char', then that line should be \n treated as a comment.\n\n Returns:\n A list of the headers (without the `name_delim')\n A np.ndarray of the values in the CSV.\n \"\"\"\n\n charset = string.ascii_letters + string.punctuation + ' \\t'\n charset = charset.replace(sep, '')\n charset = charset.replace(comment_char, '')\n\n if len(name_delim) > 0:\n charset = charset.replace(name_delim, '')\n\n headers = []\n\n while len(headers) < m:\n header_len = random.randint(1, max_column_header_len + 1)\n header = ''.join(std_choice(charset) for _ in range(header_len))\n headers.append(header.strip())\n\n values = 2000 * (random.rand(N, m) - 0.5)\n \n val_line_idx = 0\n is_header_written = False\n while val_line_idx < N: \n\n # insert some comments\n if random.rand() < 0.1:\n line = comment_char + \\\n ''.join(std_choice(charset) for _ in range(100))\n # insert some black lines \n elif random.rand() < 0.1:\n line = ''\n elif random.rand() < 0.1:\n line = ' '\n elif not(is_header_written):\n line = sep.join([name_delim + header + name_delim \n for header in headers])\n is_header_written = True\n else:\n line = sep.join([str(element)\n for element in values[val_line_idx]])\n val_line_idx += 1\n\n fd_out.write(line + '\\n')\n\n return values, headers\n\n def test_parse_csv(self):\n \n candidate_characters = string.ascii_letters + string.punctuation\n\n candidate_characters = candidate_characters.replace('.', '')\n candidate_characters = candidate_characters.replace('-', '')\n\n for _ in range(0, csv_files_to_check_n):\n\n N = random.randint(1, max_samples_n + 1)\n m = random.randint(1, max_features_n + 1)\n\n N = 10\n m = 3\n\n n_sep = ';'\n v_sep = ';'\n c_sep = '#'\n\n while c_sep == n_sep or c_sep == v_sep or n_sep == v_sep:\n n_sep = std_choice(candidate_characters)\n v_sep = std_choice(candidate_characters)\n \n if random.rand() < 0.5:\n n_sep = ''\n\n _, csv_fn = mkstemp()\n\n try:\n csv_fd = open(csv_fn, 'w')\n V1, H1 = self.synthetic_csv(csv_fd, N, m, sep=v_sep, \n name_delim=n_sep, comment_char=c_sep)\n csv_fd.close()\n V2, H2 = neuron.parse_csv(csv_fn, sep=v_sep, name_delim=n_sep)\n finally:\n os.remove(csv_fn)\n\n self.assertEqual(H1, H2)\n self.assertTrue((V1 == V2).all())\n"
] |
[
[
"numpy.random.rand",
"numpy.random.randint"
]
] |
chiro2001/zynqs
|
[
"a9340a7d0e1376c991a7c21680e93c31a2acab1e"
] |
[
"zynq-old/test/test_data.py"
] |
[
"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndat = [\r\n 16384, 8612, -1337, 2582, 10328, 4163, -8996, -9664, -540, -1274, -12721, -15195, -3892, 2413, -4918, -9055, \r\n 1598, 12103, 7531, -477, 4968, 15313, 11869, -469, -1931, 6696, 5809, -7273, -13070, -4700, 42, -8965, \r\n -15760, -7071, 3652, 290, -7166, -1007, 11851, 11952, 2047, 1859, 12330, 13864, 1745, -5174, 1799, 5871, \r\n -4551, -14548, -9241, -337, -4812, -14199, -9902, 3105, 4990, -3500, -2774, 9864, 14974, 5739, 37, 8075, \r\n 13985, 4576, -6634, -3480, 4066, -1714, -13939, -13211, -2360, -1197, -10754, -11522, 1144, 8342, 1350, -3034, \r\n 6811, 16020, 9788, -93, 3445, 12024, 7121, -6232, -8171, 562, 353, -11540, -15751, -5543, 1114, -6077, \r\n -11327, -1506, 9829, 6503, -1499, 3586, 14942, 13251, 1432, -611, 8234, 8526, -4337, -11432, -4051, 976, \r\n -8035, -16295, -9087, 1702, -1077, -9118, -3949, 9374, 10990, 1670, 1084, 12048, 15282, 4141, -3318, 3277, \r\n 8192, -1671, -12752, -8898, -145, -4325, -14707, -12064, 596, 3291, -5154, -5332, 7346, 13976, 5891, -10, \r\n 8035, 15329, 7247, -4246, -1933, 5923, 868, -12054, -13017, -2857, -1319, -11310, -13638, -1747, 6240, -100, \r\n -5057, 4471, 14956, 10299, 614, 3817, 13270, 9831, -3415, -6433, 1982, 2426, -9715, -15586, -6591, 285, \r\n -6811, -13271, -4546, 7337, 5125, -2927, 1652, 13862, 13945, 2817, 316, 9437, 11071, -1276, -9435, -2970, \r\n 2399, -6469, -16106, -10495, 163, -2140, -10849, -6894, 6596, 9560, 800, -250, 11081, 16016, 6045, -1757, \r\n 4551, 10434, 1397, -10504, -8015, 585, -3227, -14524, -13632, -1558, 1771, -6716, -7970, 4466, 12417, 5465, \r\n -624, 7359, 16028, 9456, -2067, -450, 7814, 3697, -9654, -12196, -2766, -863, -11238, -15201, -4335, 4203, \r\n -1598, -7250, 1722, 13266, 10161, 746, 3619, 13939, 12125, -730, -4626, 3560, 4809, -7338, -14732, -7007, \r\n 0, -7007, -14732, -7338, 4809, 3560, -4626, -730, 12125, 13939, 3619, 746, 10161, 13266, 1722, -7250, \r\n -1598, 4203, -4335, -15201, -11238, -863, -2766, -12196, -9654, 3697, 7814, -450, -2067, 9456, 16028, 7359, \r\n -624, 5465, 12417, 4466, -7970, -6716, 1771, -1558, -13632, -14524, -3227, 585, -8015, -10504, 1397, 10434, \r\n 4551, -1757, 6045, 16016, 11081, -250, 800, 9560, 6596, -6894, -10849, -2140, 163, -10495, -16106, -6469, \r\n 2399, -2970, -9435, -1276, 11071, 9437, 316, 2817, 13945, 13862, 1652, -2927, 5125, 7337, -4546, -13271, \r\n -6811, 285, -6591, -15586, -9715, 2426, 1982, -6433, -3415, 9831, 13270, 3817, 614, 10299, 14956, 4471, \r\n -5057, -100, 6240, -1747, -13638, -11310, -1319, -2857, -13017, -12054, 868, 5923, -1933, -4246, 7247, 15329, \r\n 8035, -10, 5891, 13976, 7346, -5332, -5154, 3291, 596, -12064, -14707, -4325, -145, -8898, -12752, -1671, \r\n 8192, 3277, -3318, 4141, 15282, 12048, 1084, 1670, 10990, 9374, -3949, -9118, -1077, 1702, -9087, -16295, \r\n -8035, 976, -4051, -11432, -4337, 8526, 8234, -611, 1432, 13251, 14942, 3586, -1499, 6503, 9829, -1506, \r\n -11327, -6077, 1114, -5543, -15751, -11540, 353, 562, -8171, -6232, 7121, 12024, 3445, -93, 9788, 16020, \r\n 6811, -3034, 1350, 8342, 1144, -11522, -10754, -1197, -2360, -13211, -13939, -1714, 4066, -3480, -6634, 4576, \r\n 13985, 8075, 37, 5739, 14974, 9864, -2774, -3500, 4990, 3105, -9902, -14199, -4812, -337, -9241, -14548, \r\n -4551, 5871, 1799, -5174, 1745, 13864, 12330, 1859, 2047, 11952, 11851, -1007, -7166, 290, 3652, -7071, \r\n -15760, -8965, 42, -4700, -13070, -7273, 5809, 6696, -1931, -469, 11869, 15313, 4968, -477, 7531, 12103, \r\n 1598, -9055, -4918, 2413, -3892, -15195, -12721, -1274, -540, -9664, -8996, 4163, 10328, 2582, -1337, 8612, \r\n 16384, 8612, -1337, 2582, 10328, 4163, -8996, -9664, -540, -1274, -12721, -15195, -3892, 2413, -4918, -9055, \r\n 1598, 12103, 7531, -477, 4968, 15313, 11869, -469, -1931, 6696, 5809, -7273, -13070, -4700, 42, -8965, \r\n -15760, -7071, 3652, 290, -7166, -1007, 11851, 11952, 2047, 1859, 12330, 13864, 1745, -5174, 1799, 5871, \r\n -4551, -14548, -9241, -337, -4812, -14199, -9902, 3105, 4990, -3500, -2774, 9864, 14974, 5739, 37, 8075, \r\n 13985, 4576, -6634, -3480, 4066, -1714, -13939, -13211, -2360, -1197, -10754, -11522, 1144, 8342, 1350, -3034, \r\n 6811, 16020, 9788, -93, 3445, 12024, 7121, -6232, -8171, 562, 353, -11540, -15751, -5543, 1114, -6077, \r\n -11327, -1506, 9829, 6503, -1499, 3586, 14942, 13251, 1432, -611, 8234, 8526, -4337, -11432, -4051, 976, \r\n -8035, -16295, -9087, 1702, -1077, -9118, -3949, 9374, 10990, 1670, 1084, 12048, 15282, 4141, -3318, 3277, \r\n 8192, -1671, -12752, -8898, -145, -4325, -14707, -12064, 596, 3291, -5154, -5332, 7346, 13976, 5891, -10, \r\n 8035, 15329, 7247, -4246, -1933, 5923, 868, -12054, -13017, -2857, -1319, -11310, -13638, -1747, 6240, -100, \r\n -5057, 4471, 14956, 10299, 614, 3817, 13270, 9831, -3415, -6433, 1982, 2426, -9715, -15586, -6591, 285, \r\n -6811, -13271, -4546, 7337, 5125, -2927, 1652, 13862, 13945, 2817, 316, 9437, 11071, -1276, -9435, -2970, \r\n 2399, -6469, -16106, -10495, 163, -2140, -10849, -6894, 6596, 9560, 800, -250, 11081, 16016, 6045, -1757, \r\n 4551, 10434, 1397, -10504, -8015, 585, -3227, -14524, -13632, -1558, 1771, -6716, -7970, 4466, 12417, 5465, \r\n -624, 7359, 16028, 9456, -2067, -450, 7814, 3697, -9654, -12196, -2766, -863, -11238, -15201, -4335, 4203, \r\n -1598, -7250, 1722, 13266, 10161, 746, 3619, 13939, 12125, -730, -4626, 3560, 4809, -7338, -14732, -7007, \r\n 0, -7007, -14732, -7338, 4809, 3560, -4626, -730, 12125, 13939, 3619, 746, 10161, 13266, 1722, -7250, \r\n -1598, 4203, -4335, -15201, -11238, -863, -2766, -12196, -9654, 3697, 7814, -450, -2067, 9456, 16028, 7359, \r\n -624, 5465, 12417, 4466, -7970, -6716, 1771, -1558, -13632, -14524, -3227, 585, -8015, -10504, 1397, 10434, \r\n 4551, -1757, 6045, 16016, 11081, -250, 800, 9560, 6596, -6894, -10849, -2140, 163, -10495, -16106, -6469, \r\n 2399, -2970, -9435, -1276, 11071, 9437, 316, 2817, 13945, 13862, 1652, -2927, 5125, 7337, -4546, -13271, \r\n -6811, 285, -6591, -15586, -9715, 2426, 1982, -6433, -3415, 9831, 13270, 3817, 614, 10299, 14956, 4471, \r\n -5057, -100, 6240, -1747, -13638, -11310, -1319, -2857, -13017, -12054, 868, 5923, -1933, -4246, 7247, 15329, \r\n 8035, -10, 5891, 13976, 7346, -5332, -5154, 3291, 596, -12064, -14707, -4325, -145, -8898, -12752, -1671, \r\n 8192, 3277, -3318, 4141, 15282, 12048, 1084, 1670, 10990, 9374, -3949, -9118, -1077, 1702, -9087, -16295, \r\n -8035, 976, -4051, -11432, -4337, 8526, 8234, -611, 1432, 13251, 14942, 3586, -1499, 6503, 9829, -1506, \r\n -11327, -6077, 1114, -5543, -15751, -11540, 353, 562, -8171, -6232, 7121, 12024, 3445, -93, 9788, 16020, \r\n 6811, -3034, 1350, 8342, 1144, -11522, -10754, -1197, -2360, -13211, -13939, -1714, 4066, -3480, -6634, 4576, \r\n 13985, 8075, 37, 5739, 14974, 9864, -2774, -3500, 4990, 3105, -9902, -14199, -4812, -337, -9241, -14548, \r\n -4551, 5871, 1799, -5174, 1745, 13864, 12330, 1859, 2047, 11952, 11851, -1007, -7166, 290, 3652, -7071, \r\n -15760, -8965, 42, -4700, -13070, -7273, 5809, 6696, -1931, -469, 11869, 15313, 4968, -477, 7531, 12103, \r\n 1598, -9055, -4918, 2413, -3892, -15195, -12721, -1274, -540, -9664, -8996, 4163, 10328, 2582, -1337, -1337\r\n]\r\n\r\nx = np.array([i for i in range(len(dat))])\r\ndata = np.array(dat)\r\n\r\nplt.plot(x, data, ls=\"-\", lw=2, label=\"test data\")\r\nplt.legend()\r\nplt.show()\r\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.array",
"matplotlib.pyplot.show"
]
] |
NAM-IL/DeepLearningResearch
|
[
"b4f35520df7ef7980132758c407356fca20c59f2"
] |
[
"ToDO/mnist_loader.py"
] |
[
"\"\"\"\r\nmnist_loader\r\n~~~~~~~~~~~~\r\n\r\nA library to load the MNIST image data. For details of the data\r\nstructures that are returned, see the doc strings for ``load_data``\r\nand ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the\r\nfunction usually called by our neural network code.\r\n\"\"\"\r\n\r\n#### Libraries\r\n# Standard library\r\n# import cPickle # python 2.x\r\nimport pickle\r\nimport gzip\r\n\r\n# Third-party libraries\r\nimport numpy as np\r\n\r\ndef load_data():\r\n \"\"\"Return the MNIST data as a tuple containing the training data,\r\n the validation data, and the test data.\r\n\r\n The ``training_data`` is returned as a tuple with two entries.\r\n The first entry contains the actual training images. This is a\r\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\r\n numpy ndarray with 784 values, representing the 28 * 28 = 784\r\n pixels in a single MNIST image.\r\n\r\n The second entry in the ``training_data`` tuple is a numpy ndarray\r\n containing 50,000 entries. Those entries are just the digit\r\n values (0...9) for the corresponding images contained in the first\r\n entry of the tuple.\r\n\r\n The ``validation_data`` and ``test_data`` are similar, except\r\n each contains only 10,000 images.\r\n\r\n This is a nice data format, but for use in neural networks it's\r\n helpful to modify the format of the ``training_data`` a little.\r\n That's done in the wrapper function ``load_data_wrapper()``, see\r\n below.\r\n \"\"\"\r\n f = gzip.open('./data/mnist.pkl.gz', 'rb')\r\n training_data, validation_data, test_data = pickle.load(f, encoding='latin1')\r\n f.close()\r\n return (training_data, validation_data, test_data)\r\n\r\ndef load_data_wrapper():\r\n \"\"\"Return a tuple containing ``(training_data, validation_data,\r\n test_data)``. Based on ``load_data``, but the format is more\r\n convenient for use in our implementation of neural networks.\r\n\r\n In particular, ``training_data`` is a list containing 50,000\r\n 2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray\r\n containing the input image. ``y`` is a 10-dimensional\r\n numpy.ndarray representing the unit vector corresponding to the\r\n correct digit for ``x``.\r\n\r\n ``validation_data`` and ``test_data`` are lists containing 10,000\r\n 2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional\r\n numpy.ndarry containing the input image, and ``y`` is the\r\n corresponding classification, i.e., the digit values (integers)\r\n corresponding to ``x``.\r\n\r\n Obviously, this means we're using slightly different formats for\r\n the training data and the validation / test data. These formats\r\n turn out to be the most convenient for use in our neural network\r\n code.\"\"\"\r\n tr_d, va_d, te_d = load_data()\r\n training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]\r\n training_results = [vectorized_result(y) for y in tr_d[1]]\r\n training_data = list(zip(training_inputs, training_results))\r\n validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]\r\n validation_data = list(zip(validation_inputs, va_d[1]))\r\n test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]\r\n test_data = list(zip(test_inputs, te_d[1]))\r\n return (training_data, validation_data, test_data)\r\n\r\ndef vectorized_result(j):\r\n \"\"\"Return a 10-dimensional unit vector with a 1.0 in the jth\r\n position and zeroes elsewhere. This is used to convert a digit\r\n (0...9) into a corresponding desired output from the neural\r\n network.\"\"\"\r\n e = np.zeros((10, 1))\r\n e[j] = 1.0\r\n return e\r\n"
] |
[
[
"numpy.reshape",
"numpy.zeros"
]
] |
kanekomasahiro/context-debias
|
[
"2161b38efcb03dd894690f54805686ed8b75939a"
] |
[
"src/run_debias_mlm.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).\nGPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned\nusing a masked language modeling (MLM) loss.\n\"\"\"\n\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport pickle\nimport random\nimport re\nimport shutil\nimport copy\nfrom typing import Dict, List, Tuple\n\nimport nltk\nimport numpy as np\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom multiprocessing import Pool\nimport multiprocessing as multi\n\nfrom transformers import (\n MODEL_WITH_LM_HEAD_MAPPING,\n WEIGHTS_NAME,\n AdamW,\n AutoConfig,\n AutoModelWithLMHead,\n AutoTokenizer,\n PreTrainedModel,\n PreTrainedTokenizer,\n get_linear_schedule_with_warmup,\n)\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\n\nMODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n\nclass TextDataset(Dataset):\n def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512):\n assert os.path.isfile(file_path)\n\n block_size = block_size - (tokenizer.max_len - tokenizer.max_len_single_sentence)\n\n directory, filename = os.path.split(file_path)\n cached_features_file = os.path.join(\n directory, args.model_type + \"_cached_lm_\" + str(block_size) + \"_\" + filename\n )\n\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n with open(cached_features_file, \"rb\") as handle:\n self.examples = pickle.load(handle)\n else:\n logger.info(\"Creating features from dataset file at %s\", directory)\n\n self.examples = []\n with open(file_path, encoding=\"utf-8\") as f:\n text = f.read()\n\n tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))\n\n for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size\n self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size]))\n # Note that we are loosing the last truncated example here for the sake of simplicity (no padding)\n # If your dataset is small, first you should loook for a bigger one :-) and second you\n # can change this behavior by adding (model specific) padding.\n\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n with open(cached_features_file, \"wb\") as handle:\n pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, item):\n return torch.tensor(self.examples[item], dtype=torch.long)\n\n\nclass LineByLineTextDataset(Dataset):\n def __init__(self, examples: list, labels: list):\n self.examples = examples\n self.labels = labels\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i):\n if self.labels:\n return torch.tensor(self.examples[i], dtype=torch.long), torch.tensor(self.labels[i], dtype=torch.long)\n else:\n return torch.tensor(self.examples[i], dtype=torch.long)\n\n\ndef create_dataset(data, dataset):\n d = dict()\n for key in data['example'].keys():\n if key not in data['label']:\n d[key] = dataset(data['example'][key], None)\n else:\n d[key] = dataset(data['example'][key], data['label'][key])\n\n return d\n\n\ndef load_and_cache_examples(data, args, tokenizer):\n if args.line_by_line:\n train_dataset = create_dataset(data['train'], LineByLineTextDataset)\n dev_dataset = create_dataset(data['dev'], LineByLineTextDataset)\n return {'train': train_dataset, 'dev': dev_dataset}\n else:\n return TextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size)\n\ndef split_data(attributes_examples, attributes_labels, neutral_examples, neutral_labels, args):\n data = {'train': {'example': {}, 'label': {}}, 'dev': {'example': {}, 'label': {}}}\n\n for i, (examples, labels) in enumerate(zip(attributes_examples, attributes_labels)):\n idx_l = list(range(len(examples)))\n random.shuffle(idx_l)\n examples = [examples[idx] for idx in idx_l]\n labels = [labels[idx] for idx in idx_l]\n data['train']['example'][f'attribute{i}'] = examples[args.dev_data_size:]\n data['train']['label'][f'attribute{i}'] = labels[args.dev_data_size:]\n data['dev']['example'][f'attribute{i}'] = examples[:args.dev_data_size]\n data['dev']['label'][f'attribute{i}'] = labels[:args.dev_data_size]\n\n idx_l = list(range(len(neutral_examples)))\n random.shuffle(idx_l)\n neutral_examples = [neutral_examples[idx] for idx in idx_l]\n data['train']['example']['neutral'] = neutral_examples[args.dev_data_size:]\n data['dev']['example']['neutral'] = neutral_examples[:args.dev_data_size]\n if neutral_labels is not None:\n neutral_labels = [neutral_labels[idx] for idx in idx_l]\n data['train']['label']['neutral'] = neutral_labels[args.dev_data_size:]\n data['dev']['label']['neutral'] = neutral_labels[:args.dev_data_size]\n\n return data\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef create_dataloader(args, datasets, tokenizer, train=False):\n def collate(batch: List[torch.Tensor]):\n if type(batch[0]) == tuple:\n examples, labels = list(zip(*batch))\n return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id), \\\n pad_sequence(labels, batch_first=True, padding_value=0)\n else:\n return pad_sequence(batch, batch_first=True, padding_value=tokenizer.pad_token_id)\n\n dataloaders = {}\n example_num = 0\n data_distribution = []\n\n max_size = max([len(value) for key, value in datasets.items() if key != 'neutral'])\n min_size = min([len(value) for key, value in datasets.items() if key != 'neutral'])\n\n for key, dataset in datasets.items():\n example_num += len(dataset)\n if train:\n dataloaders[key] = iter(DataLoader(dataset, batch_size=args.train_batch_size, collate_fn=collate, shuffle=True))\n data_distribution += [key for _ in range(int(min_size / args.train_batch_size))]\n else:\n dataloaders[key] = iter(DataLoader(dataset, batch_size=args.eval_batch_size, collate_fn=collate , shuffle=False))\n data_distribution += [key for _ in range(int(min_size / args.eval_batch_size))]\n\n return dataloaders, example_num, data_distribution\n\n\ndef train(args, data, datasets, model: PreTrainedModel, original_model, tokenizer: PreTrainedTokenizer) -> Tuple[int, float]:\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n\n train_datasets = datasets['train']\n dev_datasets = datasets['dev']\n\n train_dataloaders, train_example_num, train_distribution = create_dataloader(args, train_datasets, tokenizer, train=True)\n dev_dataloaders, dev_example_num, dev_distribution = create_dataloader(args, dev_datasets, tokenizer, train=False)\n\n train_iter_num = sum([len(dataloader) for dataloader in train_dataloaders.values()])\n dev_iter_num = sum([len(dataloader) for dataloader in dev_dataloaders.values()])\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (train_iter_num // args.gradient_accumulation_steps) + 1\n else:\n t_total = train_iter_num // args.gradient_accumulation_steps * args.num_train_epochs\n\n model = model.module if hasattr(model, \"module\") else model # Take care of distributed/parallel training\n model.resize_token_embeddings(len(tokenizer))\n\n original_model = original_model.module if hasattr(original_model, \"module\") else original_model # Take care of distributed/parallel training\n original_model.resize_token_embeddings(len(tokenizer))\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if (\n args.model_name_or_path\n and os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\"))\n and os.path.isfile(os.path.join(args.model_name_or_path, \"scheduler.pt\"))\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n original_model = torch.nn.DataParallel(original_model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)\n original_model = torch.nn.parallel.DistributedDataParallel(\n original_model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", train_example_num)\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n best_loss = float('inf')\n best_step = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if args.model_name_or_path and os.path.exists(args.model_name_or_path):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (train_iter_num // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (train_iter_num // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n\n model.zero_grad()\n original_model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0])\n\n def inner_product(x, y):\n return torch.mean(torch.sum(y * x, 3))\n\n def mean_square(x, y, idx):\n return torch.mean(torch.mean((y - x) ** 2, idx))\n #return torch.mean(torch.sum((y - x) ** 2, 3))\n\n def save_best_model(best_loss, best_step, dev_dataloaders):\n if (args.local_rank == -1 and args.evaluate_during_training): # Only evaluate when single GPU otherwise metrics may not average well\n eval_loss = evaluate(model, attributes_hiddens, dev_dataloaders)\n #eval_loss = evaluate(args, model, original_model, dev_dataloaders, dev_example_num, dev_distribution, criterion_mse, criterion_ip, feminine_hiddens, masculine_hiddens, gender_hiddens)\n logger.info(\" global_step = %s, evaluate loss = %s\", global_step, eval_loss)\n tb_writer.add_scalar(\"eval_loss\", eval_loss, global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n\n if eval_loss < best_loss:\n best_loss = eval_loss\n best_step = global_step\n checkpoint_prefix = \"checkpoint\"\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-best\")\n os.makedirs(output_dir, exist_ok=True)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n #_rotate_checkpoints(args, checkpoint_prefix)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n logger.info(\" best_step = %s, best loss = %s\", best_step, best_loss)\n\n return best_loss, best_step\n\n def get_hiddens_of_model(input):\n model.zero_grad()\n if args.model_type == 'roberta':\n _, _, hiddens = model.roberta(input)\n elif args.model_type == 'bert':\n _, _, hiddens = model.bert(input)\n elif args.model_type == 'albert':\n _, _, hiddens = model.albert(input)\n elif args.model_type == 'dbert':\n _, hiddens = model.distilbert(input)\n elif args.model_type == 'electra':\n _, hiddens = model.electra(input)\n elif args.model_type == 'gpt2':\n _, _, hiddens = model.transformer(input)\n elif args.model_type == 'gpt':\n _, hiddens = model.transformer(input)\n\n return hiddens\n\n def attribute_vector_example():\n attributes_hiddens = {f'attribute{i}': [] for i in range(2)}\n\n dataloaders, _, distribution = create_dataloader(args, train_datasets, tokenizer, train=True)\n for key in distribution:\n if key != 'neutral':\n inputs, labels = next(dataloaders[key])\n inputs = inputs.to(args.device)\n hiddens = get_hiddens_of_model(inputs)\n hiddens = torch.stack(hiddens, 2)\n if labels.size(1) > 1:\n onehot = torch.eye(hiddens.size(1))\n zeros = torch.zeros(1, onehot.size(0))\n onehot = torch.cat((zeros, onehot), 0)\n onehot = onehot[labels]\n onehot = torch.sum(onehot, 1)\n onehot = onehot.view(hiddens.size(0), -1, 1, 1)\n else:\n onehot = torch.eye(hiddens.size(1))[labels].view(hiddens.size(0), -1, 1, 1)\n onehot = onehot.to(args.device)\n attributes_hiddens[key].append(torch.sum(hiddens * onehot, 1) / labels.size(1))\n\n # neutralも含まれている\n attribute_size = len(data['train']['example'])\n for i in range(attribute_size - 1):\n attributes_hiddens[f'attribute{i}'] = torch.mean(torch.cat(attributes_hiddens[f'attribute{i}'], 0), 0).detach().unsqueeze(0)\n\n return attributes_hiddens\n\n def forward(attributes_hiddens, dataloaders, key):\n inputs = next(dataloaders[key])\n if len(inputs) == 2:\n inputs, labels = inputs\n labels = labels.to(args.device)\n else:\n labels = None\n inputs = inputs.to(args.device)\n if args.model_type == 'roberta':\n final_layer_hiddens, first_token_hidden, all_layer_hiddens = model.roberta(inputs)\n if 'neutral' != key:\n with torch.no_grad():\n final_layer_original_hiddens, _, all_layer_original_hiddens = original_model.roberta(inputs)\n if args.token_loss:\n token_predicts = model.lm_head(final_layer_hiddens)\n token_original = original_model.lm_head(final_layer_original_hiddens)\n elif args.model_type == 'bert':\n final_layer_hiddens, first_token_hidden, all_layer_hiddens = model.bert(inputs)\n if 'neutral' != key:\n with torch.no_grad():\n final_layer_original_hiddens, _, all_layer_original_hiddens = original_model.bert(inputs)\n if args.token_loss:\n token_predicts = model.cls(final_layer_hiddens)\n token_original = original_model.cls(final_layer_original_hiddens)\n elif args.model_type == 'albert':\n final_layer_hiddens, first_token_hidden, all_layer_hiddens = model.albert(inputs)\n if 'neutral' != key:\n with torch.no_grad():\n final_layer_original_hiddens, _, all_layer_original_hiddens = original_model.albert(inputs)\n if args.token_loss:\n token_predicts = model.classifier(final_layer_hiddens)\n token_original = original_model.classifier(final_layer_original_hiddens)\n elif args.model_type == 'dbert':\n final_layer_hiddens, all_layer_hiddens = model.distilbert(inputs)\n if 'neutral' != key:\n with torch.no_grad():\n final_layer_original_hiddens, all_layer_original_hiddens = original_model.distilbert(inputs)\n if args.token_loss:\n token_predicts = model.classifier(final_layer_hiddens)\n token_original = original_model.classifier(final_layer_original_hiddens)\n elif args.model_type == 'electra':\n final_layer_hiddens, all_layer_hiddens = model.electra(inputs)\n if 'neutral' != key:\n with torch.no_grad():\n final_layer_original_hiddens, all_layer_original_hiddens = original_model.electra(inputs)\n if args.token_loss:\n hiddens = model.generator_predictions(final_layer_hiddens)\n token_predicts = model.generator_lm_head(hiddens)\n original_hiddens = original_model.generator_predictions(final_layer_original_hiddens)\n token_original = original_model.generator_lm_head(original_hiddens)\n elif args.model_type == 'gpt2':\n final_layer_hiddens, first_token_hidden, all_layer_hiddens = model.transformer(inputs)\n if 'neutral' != key:\n with torch.no_grad():\n final_layer_original_hiddens, _, all_layer_original_hiddens = original_model.transformer(inputs)\n if args.token_loss:\n token_predicts = model.lm_head(final_layer_hiddens)\n token_original = original_model.lm_head(final_layer_original_hiddens)\n elif args.model_type == 'gpt':\n final_layer_hiddens, all_layer_hiddens = model.transformer(inputs)\n if 'neutral' != key:\n with torch.no_grad():\n final_layer_original_hiddens, all_layer_original_hiddens = original_model.transformer(inputs)\n if args.token_loss:\n token_predicts = model.lm_head(final_layer_hiddens)\n token_original = original_model.lm_head(final_layer_original_hiddens)\n\n all_layer_hiddens = torch.stack(all_layer_hiddens, 2)\n if 'neutral' != key:\n all_original_hiddens = torch.stack(all_layer_original_hiddens, 2)\n all_original_hiddens = all_original_hiddens.detach()\n if args.token_loss:\n original_hiddens - original_hiddens.detach()\n token_original = token_original.detach()\n if args.debias_layer == 'all':\n target_layer_hiddens = all_layer_hiddens\n target_original_hiddens = all_layer_hiddens\n else:\n if args.debias_layer == 'first':\n idx = 0\n elif args.debias_layer == 'last':\n idx = -1\n target_layer_hiddens = all_layer_hiddens[:,:,idx]\n target_layer_hiddens = target_layer_hiddens.unsqueeze(2)\n if 'neutral' != key:\n target_original_hiddens = all_original_hiddens[:,:,idx]\n target_original_hiddens = target_original_hiddens.unsqueeze(2)\n else:\n attributes_hiddens = {key: value[:,idx,:].unsqueeze(1) for key, value in attributes_hiddens.items()}\n\n if args.loss_target == 'sentence' or labels is None:\n attributes_hiddens = {key: value.unsqueeze(1) for key, value in attributes_hiddens.items()}\n #elif args.loss_target == 'token' and key == 'neutral':\n elif args.loss_target == 'token':\n if labels.size(1) > 1:\n onehot = torch.eye(target_layer_hiddens.size(1))\n zeros = torch.zeros(1, onehot.size(0))\n onehot = torch.cat((zeros, onehot), 0)\n onehot = onehot[labels]\n onehot = torch.sum(onehot, 1)\n onehot = onehot.view(target_layer_hiddens.size(0), -1, 1, 1)\n else:\n onehot = torch.eye(target_layer_hiddens.size(1))[labels].view(target_layer_hiddens.size(0), -1, 1, 1)\n onehot = onehot.to(args.device)\n target_layer_hiddens = torch.sum(target_layer_hiddens * onehot, 1).unsqueeze(1) / labels.size(1)\n if 'neutral' != key:\n target_original_hiddens = torch.sum(target_original_hiddens * onehot, 1).unsqueeze(1) / labels.size(1)\n else:\n attributes_hiddens = {key: value.expand(target_layer_hiddens.size(0),\n 1,\n value.size(1),\n value.size(2))\n for key, value in attributes_hiddens.items()}\n\n if 'neutral' == key:\n loss = 0\n for attribute_hiddens in attributes_hiddens.values():\n tmp_loss = criterion_ip(target_layer_hiddens, attribute_hiddens)\n if args.square_loss:\n tmp_loss = tmp_loss ** 2\n tmp_loss *= alpha\n loss += tmp_loss\n else:\n #loss = criterion_ms(target_layer_hiddens, target_original_hiddens)\n loss = criterion_ms(all_layer_hiddens, all_original_hiddens, 3)\n if args.token_loss:\n loss += criterion_ms(token_predicts, token_original, 2)\n #loss += criterion_ms(hiddens, original_hiddens, 2)\n loss *= beta\n\n return loss\n\n #def evaluate(args, model: PreTrainedModel, original_model, dev_dataloaders, dev_example_num, dev_distribution, criterion_mse, criterion_ip, feminine_hiddens, masculine_hiddens, gender_hiddens, prefix=\"\") -> Dict:\n def evaluate(model, attributes_hiddens, dev_dataloaders, prefix=\"\"):\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_output_dir = args.output_dir\n\n if args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir, exist_ok=True)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", dev_example_num)\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n model.eval()\n #criterion.eval()\n\n for key in tqdm(dev_distribution):\n with torch.no_grad():\n loss = forward(attributes_hiddens, dev_dataloaders, key)\n\n eval_loss += loss.item()\n\n model.zero_grad()\n original_model.zero_grad()\n\n output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\n '''\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n logger.info(\" Loss = %s\", eval_loss)\n writer.write(\"Loss = %s\\n\" % (eval_loss))\n '''\n\n return eval_loss\n\n #criterion_ms = torch.nn.MSELoss()\n criterion_ms = mean_square\n #criterion.train()\n criterion_ip = inner_product\n original_model.eval()\n\n alpha, beta = args.weighted_loss\n alpha = float(alpha)\n beta = float(beta)\n\n train_loss = 0.0\n\n for _ in train_iterator:\n\n random.shuffle(train_distribution)\n epoch_iterator = tqdm(train_distribution, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n\n model.eval()\n with torch.no_grad():\n attributes_hiddens = attribute_vector_example()\n\n for step, key in enumerate(epoch_iterator):\n model.train()\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n loss = forward(attributes_hiddens, train_dataloaders, key)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n train_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n original_model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n logger.info(\" global_step = %s, train loss = %s\", global_step, train_loss)\n train_loss = 0.0\n # Log metrics\n best_loss, best_step = save_best_model(best_loss, best_step, dev_dataloaders)\n dev_dataloaders, dev_example_num, dev_distribution = create_dataloader(args, dev_datasets, tokenizer, train=False)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n train_dataloaders, train_example_num, train_distribution = create_dataloader(args, train_datasets, tokenizer, train=True)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n dev_dataloaders, dev_example_num, dev_distribution = create_dataloader(args, dev_datasets, tokenizer, train=False)\n best_loss, best_step = save_best_model(best_loss, best_step, dev_dataloaders)\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--data_file\", default=None, type=str, required=True, help=\"The input data file.\"\n )\n parser.add_argument(\n \"--output_dir\",\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n parser.add_argument(\n \"--model_type\", type=str, required=True, help=\"The model architecture to be trained or fine-tuned.\",\n )\n\n # Other parameters\n parser.add_argument(\n \"--eval_data_file\",\n default=None,\n type=str,\n help=\"An optional input evaluation data file to evaluate the perplexity on (a text file).\",\n )\n parser.add_argument(\n \"--line_by_line\",\n action=\"store_true\",\n help=\"Whether distinct lines of text in the dataset are to be handled as distinct sequences.\",\n )\n parser.add_argument(\n \"--should_continue\", action=\"store_true\", help=\"Whether to continue from latest checkpoint in output_dir\"\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n help=\"The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.\",\n )\n\n parser.add_argument(\n \"--loss_target\", type=str, help=\"\"\n )\n\n parser.add_argument(\n \"--mlm\", action=\"store_true\", help=\"Train with masked-language modeling loss instead of language modeling.\"\n )\n parser.add_argument(\n \"--mlm_probability\", type=float, default=0.15, help=\"Ratio of tokens to mask for masked language modeling loss\"\n )\n\n parser.add_argument(\n \"--config_name\",\n default=None,\n type=str,\n help=\"Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=None,\n type=str,\n help=\"Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=None,\n type=str,\n help=\"Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)\",\n )\n parser.add_argument(\n \"--block_size\",\n default=-1,\n type=int,\n help=\"Optional input sequence length after tokenization.\"\n \"The training dataset will be truncated in block of this size for training.\"\n \"Default to the model max input length for single sentence inputs (take into account special tokens).\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Run evaluation during training at each logging step.\"\n )\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=4, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=4, type=int, help=\"Batch size per GPU/CPU for evaluation.\"\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\"--weighted_loss\", default=[1.0, 1.0], nargs='*', help=\"\")\n parser.add_argument(\"--debias_layer\", default=all, type=str, choices=['all', 'first', 'last'], help=\"\")\n parser.add_argument(\n \"--num_train_epochs\", default=1.0, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\"--square_loss\", action='store_true', help=\"\")\n parser.add_argument(\"--token_loss\", action='store_true', help=\"\")\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=500, help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--save_total_limit\",\n type=int,\n default=None,\n help=\"Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default\",\n )\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n parser.add_argument(\"--dev_data_size\", type=int, default=1000, help=\"\")\n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n tp = lambda x:list(x.split(','))\n parser.add_argument('--exclusion_list', type=tp, default=[], help='')\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n args = parser.parse_args()\n\n '''\n if args.model_type in [\"bert\", \"roberta\", \"distilbert\", \"camembert\"] and not args.mlm:\n raise ValueError(\n \"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm \"\n \"flag (masked language modeling).\"\n )\n '''\n\n if args.should_continue:\n sorted_checkpoints = _sorted_checkpoints(args)\n if len(sorted_checkpoints) == 0:\n raise ValueError(\"Used --should_continue but no checkpoint was found in --output_dir.\")\n else:\n args.model_name_or_path = sorted_checkpoints[-1]\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n and not args.should_continue\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Set seed\n set_seed(args)\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab\n\n if args.config_name:\n config = AutoConfig.from_pretrained(args.config_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n # When we release a pip version exposing CONFIG_MAPPING,\n # we can do `config = CONFIG_MAPPING[args.model_type]()`.\n raise ValueError(\n \"You are instantiating a new config instance from scratch. This is not supported, but you can do it from another script, save it,\"\n \"and load it from here, using --config_name\"\n )\n\n config.output_hidden_states = 'true' # 全層の隠れ層を取得する\n #config.output_attentions = 'true' # 全層のアテンションを取得する\n\n if args.tokenizer_name:\n tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n raise ValueError(\n \"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,\"\n \"and load it from here, using --tokenizer_name\"\n )\n\n if args.block_size <= 0:\n args.block_size = tokenizer.max_len\n # Our input block size will be the max possible for the model\n else:\n args.block_size = min(args.block_size, tokenizer.max_len)\n\n if args.model_name_or_path:\n model = AutoModelWithLMHead.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir,\n )\n original_model = AutoModelWithLMHead.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir,\n )\n else:\n logger.info(\"Training new model from scratch\")\n model = AutoModelWithLMHead.from_config(config)\n original_model = AutoModelWithLMHead.from_config(config)\n\n # GPT-2 and GPT do not have pad.\n if tokenizer._pad_token is None:\n tokenizer.add_special_tokens({'pad_token': '<pad>'})\n model.resize_token_embeddings(len(tokenizer))\n original_model.resize_token_embeddings(len(tokenizer))\n\n model.to(args.device)\n original_model.to(args.device)\n\n if args.local_rank == 0:\n torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n data = torch.load(args.data_file)\n\n attributes_examples = data['attributes_examples']\n attributes_labels = data['attributes_labels']\n neutral_examples = data['neutral_examples']\n\n if 'neutral_labels' in data:\n neutral_labels = data['neutral_labels']\n splited_data = split_data(attributes_examples, attributes_labels, neutral_examples, neutral_labels, args)\n else:\n splited_data = split_data(attributes_examples, attributes_labels, neutral_examples, None, args)\n\n datasets = load_and_cache_examples(splited_data, args, tokenizer)\n\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n if args.local_rank == 0:\n torch.distributed.barrier()\n\n train(args, splited_data, datasets, model, original_model, tokenizer)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.mean",
"torch.load",
"torch.cat",
"torch.nn.utils.rnn.pad_sequence",
"torch.sum",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.init_process_group",
"torch.distributed.barrier",
"torch.tensor",
"torch.stack",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.nn.parallel.DistributedDataParallel",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.nn.DataParallel"
]
] |
renyigan-lkgan/RenyiGAN
|
[
"c4bbb86e6037e0b95ee4c00c18729a8a8a3d2946"
] |
[
"renyigan_static_alpha.py"
] |
[
"import os\nimport time\nimport tensorflow as tf\nfrom tensorflow.keras.initializers import RandomNormal\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense, BatchNormalization, \\\n LeakyReLU, Conv2DTranspose, Conv2D, Dropout, Flatten, Reshape\nimport utils\nimport numpy as np\n\nalpha_num, trial_number, version_num, seed_num = input(\"Alpha, trial number, version, seed: \").split()\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nnp.random.seed(int(seed_num))\ntf.random.set_random_seed(int(seed_num))\n\n\nclass GAN(object):\n def __init__(self, alpha, trial_num, version):\n self.batch_size = 100\n self.n_classes = 10\n self.buffer_size = 50000\n self.training = True\n self.alpha = alpha\n self.version = version\n self.trial_num = trial_num\n self.noise_dim = 28 * 28\n self.dropout_constant = 0.6\n self.epsilon = 1e-8 # To ensure the log doesn't blow up to -infinity\n self.predictions = []\n self._make_directory('data')\n self._make_directory('data/renyigan' + str(self.alpha))\n self._make_directory('data/renyigan' + str(self.alpha) + '/v' + str(self.version))\n self._make_directory('data/renyigan' + str(self.alpha) + '/v' + str(self.version) + '/trial' + str(self.trial_num))\n\n @staticmethod\n def _make_directory(PATH):\n if not os.path.exists(PATH):\n os.mkdir(PATH)\n\n def get_data(self):\n with tf.name_scope('data'):\n train_data, test_data = utils.get_mnist_dataset(self.batch_size)\n self.iterator = tf.data.Iterator.from_structure(train_data.output_types,\n train_data.output_shapes)\n img, _ = self.iterator.get_next()\n self.img = tf.reshape(img, shape=[-1, 28, 28, 1])\n\n self.train_init = self.iterator.make_initializer(train_data)\n self.test_init = self.iterator.make_initializer(test_data)\n\n def build_generator(self):\n with tf.name_scope('generator') as scope:\n model = Sequential(name=scope)\n model.add(Dense(7 * 7 * 256, use_bias=False, kernel_initializer=\n RandomNormal(mean=0.0, stddev=0.01), input_shape=(self.noise_dim,)))\n model.add(BatchNormalization())\n model.add(LeakyReLU())\n\n model.add(Reshape((7, 7, 256)))\n assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size\n\n model.add(Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False, kernel_initializer=\n RandomNormal(mean=0.0, stddev=0.01)))\n assert model.output_shape == (None, 7, 7, 128)\n model.add(BatchNormalization())\n model.add(LeakyReLU())\n\n model.add(Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=\n RandomNormal(mean=0.0, stddev=0.01)))\n assert model.output_shape == (None, 14, 14, 64)\n model.add(BatchNormalization())\n model.add(LeakyReLU())\n\n model.add(Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', activation='tanh', use_bias=False,\n kernel_initializer=RandomNormal(mean=0.0, stddev=0.01)))\n assert model.output_shape == (None, 28, 28, 1)\n\n return model\n\n def build_discriminator(self):\n with tf.name_scope('discriminator') as scope:\n model = Sequential(name=scope)\n model.add(Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=\n RandomNormal(mean=0.0, stddev=0.01)))\n model.add(LeakyReLU())\n model.add(Dropout(0.3))\n\n model.add(Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_initializer=\n RandomNormal(mean=0.0, stddev=0.01)))\n model.add(LeakyReLU())\n model.add(Dropout(0.3))\n\n model.add(Flatten())\n model.add(Dense(1, activation='sigmoid', kernel_initializer=\n RandomNormal(mean=0.0, stddev=0.01)))\n\n return model\n\n # Vanilla DCGAN discriminator loss function\n def dis_loss_vgan(self):\n with tf.name_scope('disLossVGAN'):\n real_loss = -tf.math.log(self.real_output + self.epsilon)\n real_loss = tf.math.reduce_mean(real_loss)\n\n fake_loss = -tf.math.log(1 - self.fake_output + self.epsilon)\n fake_loss = tf.math.reduce_mean(fake_loss)\n gradients = tf.gradients(-tf.math.log(1 / self.real_output - 1), [self.img])[0]\n r1_penalty = tf.reduce_mean(tf.reduce_sum(tf.square(gradients), axis=[1, 2, 3]))\n dis_loss = real_loss + fake_loss + 5 * r1_penalty\n return dis_loss\n\n # Vanilla DCGAN discriminator loss function with gradient penalty\n def dis_loss_vgan_gp(self):\n with tf.name_scope('disLossVGANGP'):\n real_loss = -tf.math.log(self.real_output + self.epsilon)\n real_loss = tf.math.reduce_mean(real_loss)\n\n fake_loss = -tf.math.log(1 - self.fake_output + self.epsilon)\n fake_loss = tf.math.reduce_mean(fake_loss)\n\n gradients = tf.gradients(-tf.math.log(1 /self.real_output - 1), [self.img])[0]\n r1_penalty = tf.reduce_mean(tf.reduce_sum(tf.square(gradients), axis=[1,2,3]))\n dis_loss = real_loss + fake_loss + 5 * r1_penalty\n\n return dis_loss\n\n # RenyiGAN discriminator loss function\n def dis_loss(self):\n with tf.name_scope('disLoss'):\n real_loss = tf.math.reduce_mean(tf.math.pow(self.real_output, (self.alpha - 1)\n * tf.ones_like(self.real_output)))\n real_loss = 1.0 / (self.alpha - 1) * tf.math.log(real_loss + self.epsilon) + tf.math.log(2.0)\n f = tf.math.reduce_mean(tf.math.pow(1 - self.fake_output,\n (self.alpha - 1) * tf.ones_like(self.fake_output)))\n gen_loss = 1.0 / (self.alpha - 1) * tf.math.log(f + self.epsilon) + tf.math.log(2.0)\n dis_loss = - real_loss - gen_loss\n return dis_loss\n\n # Vanilla DCGAN generator l1 loss function\n def gen_loss_vgan_l1(self):\n with tf.name_scope('genLossVGANL1'):\n fake_loss = tf.math.log(1 - self.fake_output + self.epsilon)\n fake_loss = tf.math.reduce_mean(fake_loss)\n gen_loss = tf.math.abs(fake_loss + tf.math.log(2.0))\n return gen_loss\n\n # Vanilla DCGAN generator loss function\n def gen_loss_vgan(self):\n with tf.name_scope('genLossVGAN'):\n fake_loss = - tf.math.log(self.fake_output + self.epsilon)\n gen_loss = tf.math.reduce_mean(fake_loss)\n return gen_loss\n\n # RenyiGAN generator loss function (has l1 norm incorporated)\n def gen_loss_l1(self):\n with tf.name_scope('genLossL1'):\n f = tf.math.reduce_mean(tf.math.pow(1 - self.fake_output,\n (self.alpha - 1) * tf.ones_like(self.fake_output)))\n gen_loss = tf.math.abs(1.0 / (self.alpha - 1) * tf.math.log(f + self.epsilon) + tf.math.log(2.0))\n return gen_loss\n\n # RenyiGAN generator loss function\n def gen_loss(self):\n print(\"RenyiGAN \" + str(self.alpha))\n with tf.name_scope('genLoss'):\n f = tf.math.reduce_mean(tf.math.pow(1 - self.fake_output,\n (self.alpha - 1) * tf.ones_like(self.fake_output)))\n gen_loss = 1.0 / (self.alpha - 1) * tf.math.log(f + self.epsilon)\n return gen_loss\n\n def optimize(self):\n self.gen_opt = tf.train.AdamOptimizer(2e-4, beta1=0.5, name=\"generator_optimizer\")\n self.gen_opt_minimize = self.gen_opt.minimize(self.gen_loss_value, var_list=self.generator.trainable_variables)\n self.dis_opt = tf.train.AdamOptimizer(2e-4, beta1=0.5, name=\"discriminator_optimizer\")\n self.dis_opt_minimize = self.dis_opt.minimize(self.dis_loss_value,\n var_list=self.discriminator.trainable_variables)\n\n def build(self):\n self.get_data()\n self.generator = self.build_generator()\n self.discriminator = self.build_discriminator()\n self.fake_output_images = self.generator(tf.random.normal([self.batch_size, self.noise_dim]))\n self.fake_output = self.discriminator(self.fake_output_images)\n self.real_output = self.discriminator(self.img)\n if self.alpha != 1.0:\n if self.version == 1 or self.version == 3:\n print(\"RenyiGAN no L1 normalization\")\n self.gen_loss_value = self.gen_loss()\n else:\n print(\"RenyiGAN with L1 normalization\")\n self.gen_loss_value = self.gen_loss_l1()\n else:\n if self.version == 1 or self.version == 3:\n print(\"Vanilla GAN No L1 normalization\")\n self.gen_loss_value = self.gen_loss_vgan()\n else:\n print(\"Vanilla GAN with L1 normalization\")\n self.gen_loss_value = self.gen_loss_vgan_l1()\n if self.version == 1 or self.version == 2:\n self.dis_loss_value = self.dis_loss_vgan()\n else:\n self.dis_loss_value = self.dis_loss_vgan_gp()\n self.optimize()\n\n def train_one_epoch(self, sess, init, epoch):\n start_time = time.time()\n sess.run(init)\n self.training = True\n total_loss_gen = 0\n total_loss_dis = 0\n n_batches = 0\n try:\n while True:\n _, disLoss = sess.run([self.dis_opt_minimize, self.dis_loss_value])\n _, genLoss = sess.run([self.gen_opt_minimize, self.gen_loss_value])\n total_loss_gen += genLoss\n total_loss_dis += disLoss\n n_batches += 1\n except tf.errors.OutOfRangeError:\n pass\n self.save_generated_images(sess, epoch)\n print('Average generator loss at epoch {0}: {1}'.format(epoch, total_loss_gen / n_batches))\n print('Average discriminator loss at epoch {0}: {1}'.format(epoch, total_loss_dis / n_batches))\n print('Took: {0} seconds'.format(time.time() - start_time))\n\n def save_generated_images(self, sess, epoch):\n temp = self.generator(tf.random.normal([self.buffer_size, self.noise_dim]))\n temp = sess.run(temp)\n if len(self.predictions) > 0:\n self.predictions.pop(0)\n self.predictions.append(temp)\n self._make_directory('data/renyigan' + str(self.alpha) \n + 'v' + str(self.version) + '/trial' + str(self.trial_num) + '/alpha' + str(self.alpha))\n np.save('data/renyigan' + str(self.alpha) + 'v' + str(self.version) + '/trial' + str(self.trial_num) + '/alpha'\n + str(self.alpha) + '/predictions' + str(epoch), self.predictions)\n\n def train(self, n_epochs):\n self._make_directory('checkpoints')\n self._make_directory('checkpoints/renyigan' + str(self.alpha))\n self._make_directory('checkpoints/renyigan' + str(self.alpha) + '/v' + str(self.version))\n self.cpt_PATH = 'checkpoints/renyigan' + str(self.alpha) + '/v' + str(self.version) + '/trial' + str(self.trial_num)\n if self.trial_num == 1:\n self._make_directory(self.cpt_PATH)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(self.train_init)\n checkpoint = tf.train.Saver(\n {'generator_optimizer': self.gen_opt, 'discriminator_optimizer': self.dis_opt,\n 'generator': self.generator, 'discriminator': self.discriminator, 'iterator': self.iterator},\n max_to_keep=3)\n for epoch in range(n_epochs):\n if self.trial_num == 1:\n if epoch % 10 == 0:\n save_path = checkpoint.save(sess, self.cpt_PATH + str('/ckpt'), global_step=epoch)\n print(\"Saved checkpoint for step {}: {}\".format(int(epoch), save_path))\n print(\"Alpha value: \" + str(self.alpha))\n self.train_one_epoch(sess, self.train_init, epoch)\n\n\nmodel = GAN(round(float(alpha_num), 1), int(trial_number), int(version_num))\nmodel.build()\nmodel.train(n_epochs=250)\n"
] |
[
[
"tensorflow.keras.Sequential",
"tensorflow.train.AdamOptimizer",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.train.Saver",
"tensorflow.keras.layers.Flatten",
"tensorflow.global_variables_initializer",
"tensorflow.keras.layers.Reshape",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.math.reduce_mean",
"tensorflow.math.log",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.data.Iterator.from_structure",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.random.normal"
]
] |
AeroXi/CPM-Generate-Pytorch
|
[
"22bd2d7d59aa4907348f4b4b994b537c957e8eb4"
] |
[
"GPT2/model.py"
] |
[
"import math\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nclass MLP(nn.Module):\r\n def __init__(self, embedding_size):\r\n super(MLP, self).__init__()\r\n self.dense_h_to_4h = nn.Linear(embedding_size, embedding_size*4)\r\n self.dense_4h_to_h = nn.Linear(embedding_size*4, embedding_size)\r\n self.act = nn.functional.gelu\r\n\r\n def forward(self, x):\r\n h = self.act(self.dense_h_to_4h(x))\r\n h2 = self.dense_4h_to_h(h)\r\n return h2\r\n\r\nclass Attention(nn.Module):\r\n def __init__(self, \r\n embedding_size, \r\n num_attention_heads,\r\n attention_dropout,\r\n residual_dropout):\r\n super(Attention, self).__init__()\r\n \r\n self.num_attention_heads = num_attention_heads\r\n self.size_per_head = embedding_size // num_attention_heads\r\n self.embedding_size = embedding_size\r\n\r\n self.query_key_value = nn.Linear(embedding_size, embedding_size * 3)\r\n self.attn_drop = nn.Dropout(attention_dropout)\r\n self.resid_drop = nn.Dropout(residual_dropout)\r\n self.dense = nn.Linear(embedding_size, embedding_size)\r\n\r\n def split_heads(self, x):\r\n x = x.reshape([-1, self.seq_len, self.num_attention_heads, self.size_per_head])\r\n return x.permute(0, 2, 1, 3)\r\n\r\n def forward(self, x, kv_cache=None):\r\n self.seq_len = x.shape[1]\r\n x = self.query_key_value(x)\r\n q, k, v = torch.split(x, split_size_or_sections=self.embedding_size, dim=2)\r\n \r\n q = self.split_heads(q)\r\n k = self.split_heads(k)\r\n v = self.split_heads(v)\r\n \r\n if kv_cache is not None:\r\n pk, pv = kv_cache[0], kv_cache[1]\r\n k = torch.cat([pk, k], dim=-2)\r\n v = torch.cat([pv, v], dim=-2)\r\n\r\n cached_kv = torch.stack([k, v])\r\n\r\n attn = torch.matmul(q, k.transpose(-1, -2)) # [B, N, L, S]\r\n attn = attn / math.sqrt(self.size_per_head)\r\n\r\n # [L, S]\r\n attention_mask = torch.tril(torch.ones(self.seq_len, self.seq_len, dtype=torch.float32, device=x.device))\r\n attention_mask = attention_mask.reshape([1, 1, self.seq_len, self.seq_len])\r\n\r\n # adding to softmax -> its like removing them entirely\r\n attn = attn * attention_mask - 10000.0 * (1.0 - attention_mask)\r\n attn = nn.Softmax(dim=-1)(attn)\r\n attn = self.attn_drop(attn)\r\n\r\n y = torch.matmul(attn, v)\r\n # [B, N, L, S] -> [B, L, N, S]\r\n y = y.permute(0, 2, 1, 3)\r\n y = torch.reshape(y, [-1, self.seq_len, self.embedding_size])\r\n y = self.resid_drop(self.dense(y))\r\n\r\n return y, cached_kv\r\n\r\nclass Block(nn.Module):\r\n def __init__(self, \r\n embedding_size, \r\n num_attention_heads,\r\n attention_dropout,\r\n residual_dropout):\r\n super(Block, self).__init__()\r\n self.input_layernorm = nn.LayerNorm(embedding_size, eps=1e-5)\r\n self.attention = Attention(embedding_size, num_attention_heads, attention_dropout, residual_dropout)\r\n self.post_attention_layernorm = nn.LayerNorm(embedding_size, eps=1e-5)\r\n self.mlp = MLP(embedding_size)\r\n\r\n def forward(self, x, kv_cache=None):\r\n attn, cached_kv = self.attention(self.input_layernorm(x), kv_cache=kv_cache)\r\n x = x + attn\r\n z = self.post_attention_layernorm(x)\r\n z = self.mlp(z)\r\n x = x + z\r\n return x, cached_kv\r\n\r\nclass Transformer(nn.Module):\r\n def __init__(self, \r\n layer_size,\r\n embedding_size, \r\n num_attention_heads,\r\n attention_dropout,\r\n residual_dropout):\r\n super(Transformer, self).__init__()\r\n\r\n self.layers = nn.ModuleList([Block(\r\n embedding_size, \r\n num_attention_heads,\r\n attention_dropout,\r\n residual_dropout) \r\n for _ in range(layer_size)])\r\n\r\n self.final_layernorm = nn.LayerNorm(embedding_size, eps=1e-5)\r\n \r\n def forward(self, x, kv_cache=None):\r\n cached_kvs = []\r\n for i, layer in enumerate(self.layers):\r\n x, cached_kv = layer(\r\n x, \r\n kv_cache=kv_cache[i] if kv_cache is not None else None)\r\n cached_kvs.append(cached_kv)\r\n x = self.final_layernorm(x)\r\n return x, torch.stack(cached_kvs)\r\n\r\n\r\n\r\nclass GPT2Model(nn.Module):\r\n def __init__(self,\r\n vocab_size,\r\n layer_size,\r\n block_size,\r\n embedding_dropout,\r\n embedding_size,\r\n num_attention_heads,\r\n attention_dropout,\r\n residual_dropout):\r\n super(GPT2Model, self).__init__()\r\n \r\n self.word_embeddings = nn.Embedding(vocab_size, embedding_size)\r\n self.position_embeddings = nn.Embedding(block_size, embedding_size)\r\n self.emb_drop = nn.Dropout(embedding_dropout)\r\n self.transformer = Transformer(\r\n layer_size,\r\n embedding_size, \r\n num_attention_heads,\r\n attention_dropout,\r\n residual_dropout)\r\n\r\n def forward(self, x, kv_cache=None, use_cache=False):\r\n if kv_cache is None:\r\n past_length = 0\r\n else:\r\n past_length = kv_cache[0][0].shape[-2]\r\n position_ids = torch.arange(past_length, x.shape[-1] + past_length, dtype=torch.int64, device=x.device)\r\n position_ids = position_ids.unsqueeze(0).expand_as(x)\r\n # print(position_ids)\r\n x = self.word_embeddings(x)\r\n x = self.emb_drop(x + self.position_embeddings(position_ids))\r\n # print(x)\r\n x, cached_kvs = self.transformer(x, kv_cache)\r\n x = torch.matmul(x, self.word_embeddings.weight.transpose(-1, -2))\r\n if use_cache:\r\n return x, cached_kvs\r\n return x\r\n\r\n\r\nif __name__ == '__main__':\r\n gpt = GPT2Model(\r\n vocab_size=30000,\r\n layer_size=32,\r\n block_size=1024,\r\n embedding_dropout=0.0,\r\n embedding_size=2560,\r\n num_attention_heads=32,\r\n attention_dropout=0.0,\r\n residual_dropout=0.0).half()\r\n gpt.eval()\r\n for x, y in gpt.state_dict().items():\r\n print(x, y.shape)\r\n # out, cached_kvs = gpt(torch.ones(1,1,dtype=torch.int64), torch.randn(2, 2, 1, 32, 1, 80, dtype=torch.float32), use_cache=True)\r\n # print(out.shape, cached_kvs.shape)\r\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.ones",
"torch.cat",
"torch.reshape",
"torch.arange",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.matmul",
"torch.split",
"torch.stack"
]
] |
yanqiuxia/Doc2EDAG
|
[
"db6256cff47e647f53cdaf3fb43b4adb8559b95b",
"db6256cff47e647f53cdaf3fb43b4adb8559b95b"
] |
[
"dee/dee_model.py",
"dee/dee_task.py"
] |
[
"# -*- coding: utf-8 -*-\n# AUTHOR: Shun Zheng\n# DATE: 19-9-19\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport math\nfrom collections import OrderedDict, namedtuple, defaultdict\nimport random\n\nfrom . import transformer\nfrom .ner_model import NERModel\n\n\nDocSpanInfo = namedtuple(\n 'DocSpanInfo', (\n 'span_token_tup_list', # [(span_token_id, ...), ...], num_spans\n 'span_dranges_list', # [[(sent_idx, char_s, char_e), ...], ...], num_spans\n 'span_mention_range_list', # [(mention_idx_s, mention_idx_e), ...], num_spans\n 'mention_drange_list', # [(sent_idx, char_s, char_e), ...], num_mentions\n 'mention_type_list', # [mention_type_id, ...], num_mentions\n 'event_dag_info', # event_idx -> field_idx -> pre_path -> cur_span_idx_set\n 'missed_sent_idx_list', # index list of sentences where gold spans are not extracted\n )\n)\n\n\ndef get_doc_span_info_list(doc_token_types_list, doc_fea_list, use_gold_span=False):\n assert len(doc_token_types_list) == len(doc_fea_list)\n doc_span_info_list = []\n for doc_token_types, doc_fea in zip(doc_token_types_list, doc_fea_list):\n doc_token_type_mat = doc_token_types.tolist() # [[token_type, ...], ...]\n\n # using extracted results is also ok\n # span_token_tup_list, span_dranges_list = extract_doc_valid_span_info(doc_token_type_mat, doc_fea)\n if use_gold_span:\n span_token_tup_list = doc_fea.span_token_ids_list\n span_dranges_list = doc_fea.span_dranges_list\n else:\n span_token_tup_list, span_dranges_list = extract_doc_valid_span_info(doc_token_type_mat, doc_fea)\n\n if len(span_token_tup_list) == 0:\n # do not get valid entity span results,\n # just use gold spans to avoid crashing at earlier iterations\n # TODO: consider generate random negative spans\n span_token_tup_list = doc_fea.span_token_ids_list\n span_dranges_list = doc_fea.span_dranges_list\n\n # one span may have multiple mentions\n span_mention_range_list, mention_drange_list, mention_type_list = get_span_mention_info(\n span_dranges_list, doc_token_type_mat\n )\n '''\n span_mention_range_list:同一个mention 在mention_drange_list跨度范围,同一个mention在文章中可能出现多次,而且可能有不同的表达式\n mention_drange_list:文章所有mention的 sent_idx span范围\n mention_type_list:文章所有mention的类别\n '''\n '''\n span_token_tup_list: mention 所对应的token_id 长度同span_mention_range_list长度一样\n span_dranges_list:\n event_dag_info:id 为span_token_tup_list的id span_id路径信息 event_idx -> field_idx -> pre_path -> cur_span_idx_set\n '''\n # generate event decoding dag graph for model training\n event_dag_info, _, missed_sent_idx_list = doc_fea.generate_dag_info_for(span_token_tup_list, return_miss=True)\n\n # doc_span_info will incorporate all span-level information needed for the event extraction\n doc_span_info = DocSpanInfo(\n span_token_tup_list, span_dranges_list, span_mention_range_list,\n mention_drange_list, mention_type_list,\n event_dag_info, missed_sent_idx_list,\n )\n\n doc_span_info_list.append(doc_span_info)\n\n return doc_span_info_list\n\n\nclass Doc2EDAGModel(nn.Module):\n \"\"\"Document-level Event Extraction Model\"\"\"\n\n def __init__(self, config, event_type_fields_pairs, ner_model=None):\n super(Doc2EDAGModel, self).__init__()\n # Note that for distributed training, you must ensure that\n # for any batch, all parameters need to be used\n\n self.config = config\n self.event_type_fields_pairs = event_type_fields_pairs\n\n if ner_model is None:\n self.ner_model = NERModel(config)\n else:\n self.ner_model = ner_model\n\n # all event tables\n self.event_tables = nn.ModuleList([\n EventTable(event_type, field_types, config.hidden_size)\n for event_type, field_types in self.event_type_fields_pairs\n ])\n\n # sentence position indicator\n self.sent_pos_encoder = SentencePosEncoder(\n config.hidden_size, max_sent_num=config.max_sent_num, dropout=config.dropout\n )\n\n if self.config.use_token_role:\n self.ment_type_encoder = MentionTypeEncoder(\n config.hidden_size, config.num_entity_labels, dropout=config.dropout\n )\n\n # various attentive reducer\n if self.config.seq_reduce_type == 'AWA':\n self.doc_token_reducer = AttentiveReducer(config.hidden_size, dropout=config.dropout)\n self.span_token_reducer = AttentiveReducer(config.hidden_size, dropout=config.dropout)\n self.span_mention_reducer = AttentiveReducer(config.hidden_size, dropout=config.dropout)\n else:\n assert self.config.seq_reduce_type in {'MaxPooling', 'MeanPooling'}\n\n if self.config.use_doc_enc:\n # get doc-level context information for every mention and sentence\n self.doc_context_encoder = transformer.make_transformer_encoder(\n config.num_tf_layers, config.hidden_size, ff_size=config.ff_size, dropout=config.dropout\n )\n\n if self.config.use_path_mem:\n # get field-specific and history-aware information for every span\n self.field_context_encoder = transformer.make_transformer_encoder(\n config.num_tf_layers, config.hidden_size, ff_size=config.ff_size, dropout=config.dropout\n )\n\n def get_doc_span_mention_emb(self, doc_token_emb, doc_span_info):\n if len(doc_span_info.mention_drange_list) == 0:\n doc_mention_emb = None\n else:\n # get mention context embeding\n # doc_mention_emb = torch.cat([\n # # doc_token_emb[sent_idx, char_s:char_e, :].sum(dim=0, keepdim=True)\n # doc_token_emb[sent_idx, char_s:char_e, :].max(dim=0, keepdim=True)[0]\n # for sent_idx, char_s, char_e in doc_span_info.mention_drange_list\n # ])\n mention_emb_list = []\n for sent_idx, char_s, char_e in doc_span_info.mention_drange_list:\n mention_token_emb = doc_token_emb[sent_idx, char_s: char_e, :] # [num_mention_tokens, hidden_size]\n if self.config.seq_reduce_type == 'AWA':\n mention_emb = self.span_token_reducer(mention_token_emb) # [hidden_size]\n elif self.config.seq_reduce_type == 'MaxPooling':\n mention_emb = mention_token_emb.max(dim=0)[0]\n elif self.config.seq_reduce_type == 'MeanPooling':\n mention_emb = mention_token_emb.mean(dim=0)\n else:\n raise Exception('Unknown seq_reduce_type {}'.format(self.config.seq_reduce_type))\n mention_emb_list.append(mention_emb)\n doc_mention_emb = torch.stack(mention_emb_list, dim=0)\n\n # add sentence position embedding\n mention_sent_id_list = [drange[0] for drange in doc_span_info.mention_drange_list]\n doc_mention_emb = self.sent_pos_encoder(doc_mention_emb, sent_pos_ids=mention_sent_id_list)\n\n if self.config.use_token_role:\n # get mention type embedding\n doc_mention_emb = self.ment_type_encoder(doc_mention_emb, doc_span_info.mention_type_list)\n\n return doc_mention_emb\n\n def get_batch_sent_emb(self, ner_token_emb, ner_token_masks, valid_sent_num_list):\n # From [ner_batch_size, sent_len, hidden_size] to [ner_batch_size, hidden_size]\n if self.config.seq_reduce_type == 'AWA':\n total_sent_emb = self.doc_token_reducer(ner_token_emb, masks=ner_token_masks)\n elif self.config.seq_reduce_type == 'MaxPooling':\n total_sent_emb = ner_token_emb.max(dim=1)[0]\n elif self.config.seq_reduce_type == 'MeanPooling':\n total_sent_emb = ner_token_emb.mean(dim=1)\n else:\n raise Exception('Unknown seq_reduce_type {}'.format(self.config.seq_reduce_type))\n\n total_sent_pos_ids = []\n for valid_sent_num in valid_sent_num_list:\n total_sent_pos_ids += list(range(valid_sent_num))\n total_sent_emb = self.sent_pos_encoder(total_sent_emb, sent_pos_ids=total_sent_pos_ids)\n\n return total_sent_emb\n\n def get_doc_span_sent_context(self, doc_token_emb, doc_sent_emb, doc_fea, doc_span_info):\n '''\n\n :param doc_token_emb:[sent_num,sent_len,hidden_size]\n :param doc_sent_emb:[sent_num,hidden_size]\n :param doc_fea:\n :param doc_span_info:\n :return:\n '''\n '''\n 获取文章所有mention 嵌入,加入句子位置嵌入以及实体类型嵌入。长度和mention_drange_list一样\n doc_mention_emb:[num_mentions,hidden_size] 文章所有mentions\n '''\n doc_mention_emb = self.get_doc_span_mention_emb(doc_token_emb, doc_span_info)\n\n # only consider actual sentences\n if doc_sent_emb.size(0) > doc_fea.valid_sent_num:\n doc_sent_emb = doc_sent_emb[:doc_fea.valid_sent_num, :]\n\n span_context_list = []\n\n if doc_mention_emb is None:\n if self.config.use_doc_enc:\n doc_sent_context = self.doc_context_encoder(doc_sent_emb.unsqueeze(0), None).squeeze(0)\n else:\n doc_sent_context = doc_sent_emb\n else:\n num_mentions = doc_mention_emb.size(0)\n\n if self.config.use_doc_enc:\n # Size([1, num_mentions + num_valid_sents, hidden_size])\n total_ment_sent_emb = torch.cat([doc_mention_emb, doc_sent_emb], dim=0).unsqueeze(0)\n\n # size = [num_mentions+num_valid_sents, hidden_size]\n # here we do not need mask\n #使用transformer-2 编码实体和句子之间关系\n total_ment_sent_context = self.doc_context_encoder(total_ment_sent_emb, None).squeeze(0)\n\n # collect span context\n #span_mention_range_list 同一个mention 在mention_drange_list跨度范围,同一个mention在文章中可能出现多次,使用最大池化获取多个mention的上下文信息。\n for mid_s, mid_e in doc_span_info.span_mention_range_list:\n assert mid_e <= num_mentions\n multi_ment_context = total_ment_sent_context[mid_s:mid_e] # [num_mentions, hidden_size]\n\n # span_context.size [1, hidden_size]\n if self.config.seq_reduce_type == 'AWA':\n span_context = self.span_mention_reducer(multi_ment_context, keepdim=True)\n elif self.config.seq_reduce_type == 'MaxPooling':\n span_context = multi_ment_context.max(dim=0, keepdim=True)[0]\n elif self.config.seq_reduce_type == 'MeanPooling':\n span_context = multi_ment_context.mean(dim=0, keepdim=True)\n else:\n raise Exception('Unknown seq_reduce_type {}'.format(self.config.seq_reduce_type))\n #span_context [1,hidden_size]\n span_context_list.append(span_context)\n\n # collect sent context\n doc_sent_context = total_ment_sent_context[num_mentions:, :]\n else:\n # collect span context\n for mid_s, mid_e in doc_span_info.span_mention_range_list:\n assert mid_e <= num_mentions\n multi_ment_emb = doc_mention_emb[mid_s:mid_e] # [num_mentions, hidden_size]\n\n # span_context.size is [1, hidden_size]\n if self.config.seq_reduce_type == 'AWA':\n span_context = self.span_mention_reducer(multi_ment_emb, keepdim=True)\n elif self.config.seq_reduce_type == 'MaxPooling':\n span_context = multi_ment_emb.max(dim=0, keepdim=True)[0]\n elif self.config.seq_reduce_type == 'MeanPooling':\n span_context = multi_ment_emb.mean(dim=0, keepdim=True)\n else:\n raise Exception('Unknown seq_reduce_type {}'.format(self.config.seq_reduce_type))\n span_context_list.append(span_context)\n\n # collect sent context\n doc_sent_context = doc_sent_emb\n\n return span_context_list, doc_sent_context\n\n def get_event_cls_info(self, sent_context_emb, doc_fea, train_flag=True):\n doc_event_logps = []\n for event_idx, event_label in enumerate(doc_fea.event_type_labels):\n event_table = self.event_tables[event_idx]\n cur_event_logp = event_table(sent_context_emb=sent_context_emb) # [1, 2]\n doc_event_logps.append(cur_event_logp)\n doc_event_logps = torch.cat(doc_event_logps, dim=0) # [num_event_types, 2]\n\n if train_flag:\n device = doc_event_logps.device\n doc_event_labels = torch.tensor(\n doc_fea.event_type_labels, device=device, dtype=torch.long, requires_grad=False\n ) # [num_event_types]\n doc_event_cls_loss = F.nll_loss(doc_event_logps, doc_event_labels, reduction='sum')\n return doc_event_cls_loss\n else:\n doc_event_pred_list = doc_event_logps.argmax(dim=-1).tolist()\n return doc_event_pred_list\n\n def get_field_cls_info(self, event_idx, field_idx, batch_span_emb,\n batch_span_label=None, train_flag=True):\n '''\n\n :param event_idx:事件类型id\n :param field_idx:事件角色类型id\n :param batch_span_emb:[num_spans,hidden_size]\n :param batch_span_label:[num_spans]\n :param train_flag:\n :return:\n '''\n '''\n 计算当前cand_span与当前field logits\n batch_span_logp:[num_spans,2] 角色分类是一个二分类 0-代表不是该角色, 1-代表是该角色\n '''\n batch_span_logp = self.get_field_pred_logp(event_idx, field_idx, batch_span_emb)\n\n if train_flag:\n assert batch_span_label is not None\n device = batch_span_logp.device\n data_type = batch_span_logp.dtype\n # to prevent too many FPs\n class_weight = torch.tensor(\n [self.config.neg_field_loss_scaling, 1.0], device=device, dtype=data_type, requires_grad=False\n )\n '''\n field_cls_loss:[num_spans]\n '''\n field_cls_loss = F.nll_loss(batch_span_logp, batch_span_label, weight=class_weight, reduction='sum')\n return field_cls_loss, batch_span_logp\n else:\n span_pred_list = batch_span_logp.argmax(dim=-1).tolist()\n return span_pred_list, batch_span_logp\n\n def get_field_pred_logp(self, event_idx, field_idx, batch_span_emb, include_prob=False):\n event_table = self.event_tables[event_idx]\n batch_span_logp = event_table(batch_span_emb=batch_span_emb, field_idx=field_idx)\n\n if include_prob:\n # used for decision sampling, is not inside the computation graph\n batch_span_prob = batch_span_logp.detach().exp()\n return batch_span_logp, batch_span_prob\n else:\n return batch_span_logp\n\n def get_none_span_context(self, init_tensor):\n none_span_context = torch.zeros(\n 1, self.config.hidden_size,\n device=init_tensor.device, dtype=init_tensor.dtype, requires_grad=False\n )\n return none_span_context\n\n def conduct_field_level_reasoning(self, event_idx, field_idx, prev_decode_context, batch_span_context):\n event_table = self.event_tables[event_idx]\n field_query = event_table.field_queries[field_idx]\n num_spans = batch_span_context.size(0)\n # make the model to be aware of which field\n batch_cand_emb = batch_span_context + field_query\n if self.config.use_path_mem:\n # [1, num_spans + valid_sent_num, hidden_size]\n total_cand_emb = torch.cat([batch_cand_emb, prev_decode_context], dim=0).unsqueeze(0)\n # use transformer to do the reasoning transformer-3编码 候选span和memmory-tensor\n total_cand_emb = self.field_context_encoder(total_cand_emb, None).squeeze(0)\n batch_cand_emb = total_cand_emb[:num_spans, :]\n # TODO: what if reasoning over reasoning context\n return batch_cand_emb, prev_decode_context\n\n def get_field_mle_loss_list(self, doc_sent_context, batch_span_context,\n event_idx, field_idx2pre_path2cur_span_idx_set):\n '''\n :param doc_sent_context: [sent_num, hidden_size]\n :param batch_span_context: [num_spans, hidden_size]\n :param event_idx: [1]\n :param field_idx2pre_path2cur_span_idx_set: [field_nums,(pre_path:cur_span_idx_set)]\n :return: field_mle_loss_list: ?\n '''\n field_mle_loss_list = []\n num_fields = self.event_tables[event_idx].num_fields\n num_spans = batch_span_context.size(0)\n #初始化解码上下文向量采用句子向量 [sent_num, hidden_size]\n prev_path2prev_decode_context = {\n (): doc_sent_context\n }\n\n for field_idx in range(num_fields):\n prev_path2cur_span_idx_set = field_idx2pre_path2cur_span_idx_set[field_idx]\n for prev_path, cur_span_idx_set in prev_path2cur_span_idx_set.items():\n if prev_path not in prev_path2prev_decode_context:\n # note that when None and valid_span co-exists, ignore None paths during training\n continue\n # get decoding context\n prev_decode_context = prev_path2prev_decode_context[prev_path]\n\n '''\n batch_cand_emb size与batch_span_context一样 \n batch_cand_emb [num_spans, hidden_size]\n prev_decode_context:[num_spans+decode_spans, hidden_size]\n conduct reasoning on this field transformer-3编码span和 当前memory tensor,\n 获取transformer-3编码的batch_cand_emb,prev_decode_context保持不变\n '''\n batch_cand_emb, prev_decode_context = self.conduct_field_level_reasoning(\n event_idx, field_idx, prev_decode_context, batch_span_context\n )\n # prepare label for candidate spans\n #batch_span_label[num_spans] one-hot label\n batch_span_label = get_batch_span_label(\n num_spans, cur_span_idx_set, batch_span_context.device\n )\n '''\n calculate loss\n cur_field_cls_loss [1]\n batch_span_logp [num_spans,2]\n '''\n cur_field_cls_loss, batch_span_logp = self.get_field_cls_info(\n event_idx, field_idx, batch_cand_emb,\n batch_span_label=batch_span_label, train_flag=True\n )\n\n field_mle_loss_list.append(cur_field_cls_loss)\n\n # cur_span_idx_set needs to ensure at least one element, None\n for span_idx in cur_span_idx_set:\n # Teacher-forcing Style Training\n if span_idx is None:\n span_context = self.event_tables[event_idx].field_queries[field_idx]\n else:\n # TODO: add either batch_cand_emb or batch_span_context to the memory tensor\n span_context = batch_cand_emb[span_idx].unsqueeze(0)\n\n cur_path = prev_path + (span_idx, )\n if self.config.use_path_mem:\n #cur_decode_context[num_spans+decode_spans, hidden_size]\n cur_decode_context = torch.cat([prev_decode_context, span_context], dim=0)\n prev_path2prev_decode_context[cur_path] = cur_decode_context\n else:\n prev_path2prev_decode_context[cur_path] = prev_decode_context\n\n return field_mle_loss_list\n\n def get_loss_on_doc(self, doc_token_emb, doc_sent_emb, doc_fea, doc_span_info):\n '''\n\n :param doc_token_emb: [max_sent_num,sen_len,hidden_size]\n :param doc_sent_emb: [max_sent_num,hidden_size] 加入句子位置向量信\n :param doc_fea:字典比较多字段\n :param doc_span_info:字典,包含比较多字段\n :return:total_event_loss [1]\n '''\n '''\n 首先加入句子位置嵌入以及实体类型嵌入获取文章中所有mention向量\n 采用transformer-2编码实体和句子之间的关系,并且同一mention在文章不同位置采用最大池化获取特征。\n span_context_list:[num_spans,hidden_size] num_mentions和span_mention_range_list长度一致,指的是唯一标识的mention。\n doc_sent_context: [sent_num,hidden_size]\n '''\n span_context_list, doc_sent_context = self.get_doc_span_sent_context(\n doc_token_emb, doc_sent_emb, doc_fea, doc_span_info,\n )\n #span_context_list [num_mentions, hidden_size]\n # doc_sent_context [sent_num, hidden_size]\n if len(span_context_list) == 0:\n raise Exception('Error: doc_fea.ex_idx {} does not have valid span'.format(doc_fea.ex_idx))\n #batch_span_context [num_mentions, hidden_size]\n batch_span_context = torch.cat(span_context_list, dim=0)\n num_spans = len(span_context_list)\n #event_idx2field_idx2pre_path2cur_span_idx_set [envent_nums, field_nums,(pre_path:cur_span_idx_set)]\n event_idx2field_idx2pre_path2cur_span_idx_set = doc_span_info.event_dag_info\n\n # 1. get event type classification loss 多标签分类\n #[1]函数里面求sum doc_sent_context: [sent_num,hidden_size]\n event_cls_loss = self.get_event_cls_info(doc_sent_context, doc_fea, train_flag=True)\n\n # 2. for each event type, get field classification loss\n # Note that including the memory tensor into the computing graph can boost the performance (>1 F1)\n all_field_loss_list = []\n for event_idx, event_label in enumerate(doc_fea.event_type_labels):\n if event_label == 0:\n # treat all spans as invalid arguments for that event,\n # because we need to use all parameters to support distributed training\n #memory tensor预先使用句子向量信息 [sent_num,hidden_size]\n prev_decode_context = doc_sent_context\n num_fields = self.event_tables[event_idx].num_fields\n for field_idx in range(num_fields):\n # conduct reasoning on this field transformer-3编码span和 memory tensor\n batch_cand_emb, prev_decode_context = self.conduct_field_level_reasoning(\n event_idx, field_idx, prev_decode_context, batch_span_context\n )\n #batch_cand_emb size与batch_span_context一样\n # prepare label for candidate spans\n # batch_cand_emb [num_spans, hidden_size]\n # batch_span_label [num_spans] one-hot label\n batch_span_label = get_batch_span_label(\n num_spans, set(), batch_span_context.device\n )\n # calculate the field loss\n #cur_field_cls_loss [1]\n #batch_span_logp [num_spans,2]\n cur_field_cls_loss, batch_span_logp = self.get_field_cls_info(\n event_idx, field_idx, batch_cand_emb,\n batch_span_label=batch_span_label, train_flag=True\n )\n # update the memory tensor\n # span_context:[1,hidden_size]\n span_context = self.event_tables[event_idx].field_queries[field_idx]\n if self.config.use_path_mem:\n prev_decode_context = torch.cat([prev_decode_context, span_context], dim=0)\n\n all_field_loss_list.append(cur_field_cls_loss)\n else:\n #field_idx2pre_path2cur_span_idx_set [field_nums,(pre_path:cur_span_idx_set)]\n #doc_sent_context [sent_num, hidden_size]\n #batch_span_context[num_spans, hidden_size]\n field_idx2pre_path2cur_span_idx_set = event_idx2field_idx2pre_path2cur_span_idx_set[event_idx]\n # 如果包含该事件,则获取事件角色所有loss\n field_loss_list = self.get_field_mle_loss_list(\n doc_sent_context, batch_span_context, event_idx, field_idx2pre_path2cur_span_idx_set,\n )\n all_field_loss_list += field_loss_list\n\n total_event_loss = event_cls_loss + sum(all_field_loss_list)\n return total_event_loss\n\n def get_mix_loss(self, doc_sent_loss_list, doc_event_loss_list, doc_span_info_list):\n batch_size = len(doc_span_info_list)\n loss_batch_avg = 1.0 / batch_size\n lambda_1 = self.config.loss_lambda\n lambda_2 = 1 - lambda_1\n\n doc_ner_loss_list = []\n for doc_sent_loss, doc_span_info in zip(doc_sent_loss_list, doc_span_info_list):\n # doc_sent_loss: Size([num_valid_sents])\n doc_ner_loss_list.append(doc_sent_loss.sum())\n\n return loss_batch_avg * (lambda_1 * sum(doc_ner_loss_list) + lambda_2 * sum(doc_event_loss_list))\n\n def get_eval_on_doc(self, doc_token_emb, doc_sent_emb, doc_fea, doc_span_info):\n span_context_list, doc_sent_context = self.get_doc_span_sent_context(\n doc_token_emb, doc_sent_emb, doc_fea, doc_span_info\n )\n if len(span_context_list) == 0:\n event_pred_list = []\n event_idx2obj_idx2field_idx2token_tup = []\n event_idx2event_decode_paths = []\n for event_idx in range(len(self.event_type_fields_pairs)):\n event_pred_list.append(0)\n event_idx2obj_idx2field_idx2token_tup.append(None)\n event_idx2event_decode_paths.append(None)\n\n return doc_fea.ex_idx, event_pred_list, event_idx2obj_idx2field_idx2token_tup, \\\n doc_span_info, event_idx2event_decode_paths\n\n batch_span_context = torch.cat(span_context_list, dim=0)\n\n # 1. get event type prediction\n event_pred_list = self.get_event_cls_info(doc_sent_context, doc_fea, train_flag=False)\n\n # 2. for each event type, get field prediction\n # the following mappings are all implemented using list index\n event_idx2event_decode_paths = []\n event_idx2obj_idx2field_idx2token_tup = []\n for event_idx, event_pred in enumerate(event_pred_list):\n if event_pred == 0:\n event_idx2event_decode_paths.append(None)\n event_idx2obj_idx2field_idx2token_tup.append(None)\n continue\n\n num_fields = self.event_tables[event_idx].num_fields\n\n prev_path2prev_decode_context = {(): doc_sent_context}\n last_field_paths = [()] # only record paths of the last field\n for field_idx in range(num_fields):\n cur_paths = []\n for prev_path in last_field_paths: # traverse all previous decoding paths\n # get decoding context\n prev_decode_context = prev_path2prev_decode_context[prev_path]\n # conduct reasoning on this field\n batch_cand_emb, prev_decode_context = self.conduct_field_level_reasoning(\n event_idx, field_idx, prev_decode_context, batch_span_context\n )\n\n # get field prediction for all spans\n span_pred_list, _ = self.get_field_cls_info(\n event_idx, field_idx, batch_cand_emb, train_flag=False\n )\n\n # prepare span_idx to be used for the next field\n cur_span_idx_list = []\n for span_idx, span_pred in enumerate(span_pred_list):\n if span_pred == 1:\n cur_span_idx_list.append(span_idx)\n if len(cur_span_idx_list) == 0:\n # all span is invalid for this field, just choose 'Unknown' token\n cur_span_idx_list.append(None)\n\n for span_idx in cur_span_idx_list:\n if span_idx is None:\n span_context = self.event_tables[event_idx].field_queries[field_idx]\n # span_context = none_span_context\n else:\n span_context = batch_cand_emb[span_idx].unsqueeze(0)\n\n cur_path = prev_path + (span_idx, )\n cur_decode_context = torch.cat([prev_decode_context, span_context], dim=0)\n cur_paths.append(cur_path)\n prev_path2prev_decode_context[cur_path] = cur_decode_context\n\n # update decoding paths\n last_field_paths = cur_paths\n\n obj_idx2field_idx2token_tup = []\n for decode_path in last_field_paths:\n assert len(decode_path) == num_fields\n field_idx2token_tup = []\n for span_idx in decode_path:\n if span_idx is None:\n token_tup = None\n else:\n token_tup = doc_span_info.span_token_tup_list[span_idx]\n\n field_idx2token_tup.append(token_tup)\n obj_idx2field_idx2token_tup.append(field_idx2token_tup)\n\n event_idx2event_decode_paths.append(last_field_paths)\n event_idx2obj_idx2field_idx2token_tup.append(obj_idx2field_idx2token_tup)\n\n # the first three terms are for metric calculation, the last two are for case studies\n return doc_fea.ex_idx, event_pred_list, event_idx2obj_idx2field_idx2token_tup, \\\n doc_span_info, event_idx2event_decode_paths\n\n def adjust_token_label(self, doc_token_labels_list):\n if self.config.use_token_role: # do not use detailed token\n return doc_token_labels_list\n else:\n adj_doc_token_labels_list = []\n for doc_token_labels in doc_token_labels_list:\n entity_begin_mask = doc_token_labels % 2 == 1\n entity_inside_mask = (doc_token_labels != 0) & (doc_token_labels % 2 == 0)\n adj_doc_token_labels = doc_token_labels.masked_fill(entity_begin_mask, 1)\n adj_doc_token_labels = adj_doc_token_labels.masked_fill(entity_inside_mask, 2)\n\n adj_doc_token_labels_list.append(adj_doc_token_labels)\n return adj_doc_token_labels_list\n\n def get_local_context_info(self, doc_batch_dict, train_flag=False, use_gold_span=False):\n label_key = 'doc_token_labels'\n if train_flag or use_gold_span:\n assert label_key in doc_batch_dict\n need_label_flag = True\n else:\n need_label_flag = False\n\n if need_label_flag:\n doc_token_labels_list = self.adjust_token_label(doc_batch_dict[label_key])\n else:\n doc_token_labels_list = None\n\n batch_size = len(doc_batch_dict['ex_idx'])\n doc_token_ids_list = doc_batch_dict['doc_token_ids']\n doc_token_masks_list = doc_batch_dict['doc_token_masks']\n valid_sent_num_list = doc_batch_dict['valid_sent_num']\n\n # transform doc_batch into sent_batch\n ner_batch_idx_start_list = [0]\n ner_token_ids = []\n ner_token_masks = []\n ner_token_labels = [] if need_label_flag else None\n for batch_idx, valid_sent_num in enumerate(valid_sent_num_list):\n idx_start = ner_batch_idx_start_list[-1]\n idx_end = idx_start + valid_sent_num\n ner_batch_idx_start_list.append(idx_end)\n\n ner_token_ids.append(doc_token_ids_list[batch_idx])\n ner_token_masks.append(doc_token_masks_list[batch_idx])\n if need_label_flag:\n ner_token_labels.append(doc_token_labels_list[batch_idx])\n\n # [ner_batch_size, sent_len]\n ner_token_ids = torch.cat(ner_token_ids, dim=0)\n # [ner_batch_size, sent_len]\n ner_token_masks = torch.cat(ner_token_masks, dim=0)\n if need_label_flag:\n ner_token_labels = torch.cat(ner_token_labels, dim=0)\n\n # get ner output\n ner_token_emb, ner_loss, ner_token_preds = self.ner_model(\n ner_token_ids, ner_token_masks, label_ids=ner_token_labels,\n train_flag=train_flag, decode_flag=not use_gold_span,\n )\n # ner_token_emb [batch_size*sent_num, sen_len, hidden_size]\n # ner_token_preds [batch_size*sent_num, sen_len]\n\n if use_gold_span: # definitely use gold span info\n ner_token_types = ner_token_labels\n else:\n ner_token_types = ner_token_preds\n\n # get sentence embedding [batch_size*sent_num,hidden_size],采用最大池化获取句子向量\n ner_sent_emb = self.get_batch_sent_emb(ner_token_emb, ner_token_masks, valid_sent_num_list)\n\n assert sum(valid_sent_num_list) == ner_token_emb.size(0) == ner_sent_emb.size(0)\n\n # followings are all lists of tensors\n doc_token_emb_list = []\n doc_token_masks_list = []\n doc_token_types_list = []\n doc_sent_emb_list = []\n doc_sent_loss_list = []\n for batch_idx in range(batch_size):\n idx_start = ner_batch_idx_start_list[batch_idx]\n idx_end = ner_batch_idx_start_list[batch_idx+1]\n doc_token_emb_list.append(ner_token_emb[idx_start:idx_end, :, :])\n doc_token_masks_list.append(ner_token_masks[idx_start:idx_end, :])\n doc_token_types_list.append(ner_token_types[idx_start:idx_end, :])\n doc_sent_emb_list.append(ner_sent_emb[idx_start:idx_end, :])\n if ner_loss is not None:\n # every doc_sent_loss.size is [valid_sent_num]\n doc_sent_loss_list.append(ner_loss[idx_start:idx_end])\n\n return doc_token_emb_list, doc_token_masks_list, doc_token_types_list, doc_sent_emb_list, doc_sent_loss_list\n\n def forward(self, doc_batch_dict, doc_features,\n train_flag=True, use_gold_span=False, teacher_prob=1,\n event_idx2entity_idx2field_idx=None, heuristic_type=None):\n # Using scheduled sampling to gradually transit to predicted entity spans\n if train_flag and self.config.use_scheduled_sampling:\n # teacher_prob will gradually decrease outside\n if random.random() < teacher_prob:\n use_gold_span = True\n else:\n use_gold_span = False\n\n '''\n get doc token-level local context\n 采用ner编码获取句子内实体识别loss,以及获取句子向量加入句子位置特征\n '''\n doc_token_emb_list, doc_token_masks_list, doc_token_types_list, doc_sent_emb_list, doc_sent_loss_list = \\\n self.get_local_context_info(\n doc_batch_dict, train_flag=train_flag, use_gold_span=use_gold_span,\n )\n '''\n doc_sent_emb_list:transformer-1编码之后的embedding\n doc_token_emb_list [batch_size,max_sent_num,sen_len,hidden_size]\n doc_token_masks_list [batch_size,max_sent_num,sen_len]\n doc_token_types_list [batch_size,max_sent_num,sen_len] 训练随机选择真实标签还是预测标签,推理使用预测标签\n doc_sent_emb_list [batch_size,max_sent_num,hidden_size]\n doc_sent_loss_list [batch_size,max_sent_num] 每个句子的ner_loss\n '''\n # get doc feature objects\n ex_idx_list = doc_batch_dict['ex_idx']\n doc_fea_list = [doc_features[ex_idx] for ex_idx in ex_idx_list]\n\n # get doc span-level info for event extraction\n doc_span_info_list = get_doc_span_info_list(doc_token_types_list, doc_fea_list, use_gold_span=use_gold_span)\n '''\n doc_span_info_list:包含下列字段值 [batch_size,(dict)]\n event_dag_info: 有向无环图事件信息表 span_id 为span_token_tup_list的id, span_id路径信息 event_idx -> field_idx -> pre_path -> cur_span_idx_set\n mention_drange_list: 文章所有mention的 (sent_idx span范围)\n mention_type_list: 文章所有mention的类别, 长度和 mention_drange_list一样\n span_dranges_list: 同一mention 所对应(sent_id span范围),长度和span_token_tup_list一样\n span_mention_range_list: 同一个mention 在mention_drange_list跨度范围,同一个mention在文章中可能出现多次,而且可能有不同的表达式\n span_token_tup_list: mention 所对应的token_id, 长度同span_mention_range_list长度一样\n '''\n if train_flag:\n doc_event_loss_list = [] #[batch_size]\n for batch_idx, ex_idx in enumerate(ex_idx_list):\n '''\n 获取篇章级事件抽取事件类型损失,以及事件所有角色损失\n '''\n doc_event_loss_list.append(\n self.get_loss_on_doc(\n doc_token_emb_list[batch_idx],\n doc_sent_emb_list[batch_idx],\n doc_fea_list[batch_idx],\n doc_span_info_list[batch_idx],\n )\n )\n\n mix_loss = self.get_mix_loss(doc_sent_loss_list, doc_event_loss_list, doc_span_info_list)\n\n return mix_loss\n else:\n # return a list object may not be supported by torch.nn.parallel.DataParallel\n # ensure to run it under the single-gpu mode\n eval_results = []\n\n if heuristic_type is None:\n for batch_idx, ex_idx in enumerate(ex_idx_list):\n eval_results.append(\n self.get_eval_on_doc(\n doc_token_emb_list[batch_idx],\n doc_sent_emb_list[batch_idx],\n doc_fea_list[batch_idx],\n doc_span_info_list[batch_idx],\n )\n )\n else:\n assert event_idx2entity_idx2field_idx is not None\n for batch_idx, ex_idx in enumerate(ex_idx_list):\n eval_results.append(\n self.heuristic_decode_on_doc(\n doc_token_emb_list[batch_idx],\n doc_sent_emb_list[batch_idx],\n doc_fea_list[batch_idx],\n doc_span_info_list[batch_idx],\n event_idx2entity_idx2field_idx,\n heuristic_type=heuristic_type,\n )\n )\n\n return eval_results\n\n def heuristic_decode_on_doc(self, doc_token_emb, doc_sent_emb, doc_fea, doc_span_info,\n event_idx2entity_idx2field_idx, heuristic_type='GreedyDec'):\n support_heuristic_types = ['GreedyDec', 'ProductDec']\n if heuristic_type not in support_heuristic_types:\n raise Exception('Unsupported heuristic type {}, pleasure choose from {}'.format(\n heuristic_type, str(support_heuristic_types)\n ))\n\n span_context_list, doc_sent_context = self.get_doc_span_sent_context(\n doc_token_emb, doc_sent_emb, doc_fea, doc_span_info\n )\n\n span_token_tup_list = doc_span_info.span_token_tup_list\n span_mention_range_list = doc_span_info.span_mention_range_list\n mention_drange_list = doc_span_info.mention_drange_list\n mention_type_list = doc_span_info.mention_type_list\n # heuristic decoding strategies will work on these span candidates\n event_idx2field_idx2span_token_tup2dranges = self.get_event_field_span_candidates(\n span_token_tup_list, span_mention_range_list, mention_drange_list,\n mention_type_list, event_idx2entity_idx2field_idx,\n )\n\n # if there is no extracted span, just directly return\n if len(span_token_tup_list) == 0:\n event_pred_list = []\n event_idx2obj_idx2field_idx2token_tup = [] # this term will be compared with ground-truth table contents\n for event_idx in range(len(self.event_type_fields_pairs)):\n event_pred_list.append(0)\n event_idx2obj_idx2field_idx2token_tup.append(None)\n\n return doc_fea.ex_idx, event_pred_list, event_idx2obj_idx2field_idx2token_tup, \\\n doc_span_info, event_idx2field_idx2span_token_tup2dranges\n\n # 1. get event type prediction as model-based approach\n event_pred_list = self.get_event_cls_info(doc_sent_context, doc_fea, train_flag=False)\n\n # 2. for each event type, get field prediction\n # From now on, use heuristic inference to get the token for the field\n # the following mappings are all implemented using list index\n event_idx2obj_idx2field_idx2token_tup = []\n for event_idx, event_pred in enumerate(event_pred_list):\n if event_pred == 0:\n event_idx2obj_idx2field_idx2token_tup.append(None)\n continue\n\n num_fields = self.event_tables[event_idx].num_fields\n field_idx2span_token_tup2dranges = event_idx2field_idx2span_token_tup2dranges[event_idx]\n\n obj_idx2field_idx2token_tup = [[]] # at least one decode path will be appended\n for field_idx in range(num_fields):\n if heuristic_type == support_heuristic_types[0]:\n obj_idx2field_idx2token_tup = append_top_span_only(\n obj_idx2field_idx2token_tup, field_idx, field_idx2span_token_tup2dranges\n )\n elif heuristic_type == support_heuristic_types[1]:\n obj_idx2field_idx2token_tup = append_all_spans(\n obj_idx2field_idx2token_tup, field_idx, field_idx2span_token_tup2dranges\n )\n else:\n raise Exception('Unsupported heuristic type {}, pleasure choose from {}'.format(\n heuristic_type, str(support_heuristic_types)\n ))\n\n event_idx2obj_idx2field_idx2token_tup.append(obj_idx2field_idx2token_tup)\n\n return doc_fea.ex_idx, event_pred_list, event_idx2obj_idx2field_idx2token_tup, \\\n doc_span_info, event_idx2field_idx2span_token_tup2dranges\n\n def get_event_field_span_candidates(self, span_token_tup_list, span_mention_range_list,\n mention_drange_list, mention_type_list, event_idx2entity_idx2field_idx):\n # get mention idx -> span idx\n mention_span_idx_list = []\n for span_idx, (ment_idx_s, ment_idx_e) in enumerate(span_mention_range_list):\n mention_span_idx_list.extend([span_idx] * (ment_idx_e - ment_idx_s))\n assert len(mention_span_idx_list) == len(mention_drange_list)\n\n event_idx2field_idx2span_token_tup2dranges = {}\n for event_idx, (event_type, field_types) in enumerate(self.event_type_fields_pairs):\n # get the predefined entity idx to field idx mapping\n gold_entity_idx2field_idx = event_idx2entity_idx2field_idx[event_idx]\n\n # store field candidates for this doc\n field_idx2span_token_tup2dranges = {}\n for field_idx, _ in enumerate(field_types):\n field_idx2span_token_tup2dranges[field_idx] = {}\n\n # aggregate field candidates according to mention types\n for ment_idx, (ment_drange, ment_entity_idx) in enumerate(zip(mention_drange_list, mention_type_list)):\n if ment_entity_idx not in gold_entity_idx2field_idx:\n continue\n ment_field_idx = gold_entity_idx2field_idx[ment_entity_idx]\n if ment_field_idx is None:\n continue\n\n ment_span_idx = mention_span_idx_list[ment_idx]\n span_token_tup = span_token_tup_list[ment_span_idx]\n\n # because it is dict, so all modifications to the key will take effect in raw dict\n cur_span_token_tup2dranges = field_idx2span_token_tup2dranges[ment_field_idx]\n if span_token_tup not in cur_span_token_tup2dranges:\n cur_span_token_tup2dranges[span_token_tup] = []\n cur_span_token_tup2dranges[span_token_tup].append(ment_drange)\n\n event_idx2field_idx2span_token_tup2dranges[event_idx] = field_idx2span_token_tup2dranges\n\n return event_idx2field_idx2span_token_tup2dranges\n\n\ndef append_top_span_only(last_token_path_list, field_idx, field_idx2span_token_tup2dranges):\n new_token_path_list = []\n span_token_tup2dranges = field_idx2span_token_tup2dranges[field_idx]\n token_min_drange_list = [\n (token_tup, dranges[0]) for token_tup, dranges in span_token_tup2dranges.items()\n ]\n token_min_drange_list.sort(key=lambda x: x[1])\n\n for last_token_path in last_token_path_list:\n new_token_path = list(last_token_path)\n if len(token_min_drange_list) == 0:\n new_token_path.append(None)\n else:\n token_tup = token_min_drange_list[0][0]\n new_token_path.append(token_tup)\n\n new_token_path_list.append(new_token_path)\n\n return new_token_path_list\n\n\ndef append_all_spans(last_token_path_list, field_idx, field_idx2span_token_tup2dranges):\n new_token_path_list = []\n span_token_tup2dranges = field_idx2span_token_tup2dranges[field_idx]\n\n for last_token_path in last_token_path_list:\n for token_tup in span_token_tup2dranges.keys():\n new_token_path = list(last_token_path)\n new_token_path.append(token_tup)\n new_token_path_list.append(new_token_path)\n\n if len(span_token_tup2dranges) == 0: # ensure every last path will be extended\n new_token_path = list(last_token_path)\n new_token_path.append(None)\n new_token_path_list.append(new_token_path)\n\n return new_token_path_list\n\n\nclass AttentiveReducer(nn.Module):\n def __init__(self, hidden_size, dropout=0.1):\n super(AttentiveReducer, self).__init__()\n\n self.hidden_size = hidden_size\n self.att_norm = math.sqrt(self.hidden_size)\n\n self.fc = nn.Linear(hidden_size, 1, bias=False)\n self.att = None\n\n self.layer_norm = transformer.LayerNorm(hidden_size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, batch_token_emb, masks=None, keepdim=False):\n # batch_token_emb: Size([*, seq_len, hidden_size])\n # masks: Size([*, seq_len]), 1: normal, 0: pad\n\n query = self.fc.weight\n if masks is None:\n att_mask = None\n else:\n att_mask = masks.unsqueeze(-2) # [*, 1, seq_len]\n\n # batch_att_emb: Size([*, 1, hidden_size])\n # self.att: Size([*, 1, seq_len])\n batch_att_emb, self.att = transformer.attention(\n query, batch_token_emb, batch_token_emb, mask=att_mask\n )\n\n batch_att_emb = self.dropout(self.layer_norm(batch_att_emb))\n\n if keepdim:\n return batch_att_emb\n else:\n return batch_att_emb.squeeze(-2)\n\n def extra_repr(self):\n return 'hidden_size={}, att_norm={}'.format(self.hidden_size, self.att_norm)\n\n\nclass SentencePosEncoder(nn.Module):\n def __init__(self, hidden_size, max_sent_num=100, dropout=0.1):\n super(SentencePosEncoder, self).__init__()\n\n self.embedding = nn.Embedding(max_sent_num, hidden_size)\n self.layer_norm = transformer.LayerNorm(hidden_size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, batch_elem_emb, sent_pos_ids=None):\n if sent_pos_ids is None:\n num_elem = batch_elem_emb.size(-2)\n sent_pos_ids = torch.arange(\n num_elem, dtype=torch.long, device=batch_elem_emb.device, requires_grad=False\n )\n elif not isinstance(sent_pos_ids, torch.Tensor):\n sent_pos_ids = torch.tensor(\n sent_pos_ids, dtype=torch.long, device=batch_elem_emb.device, requires_grad=False\n )\n\n batch_pos_emb = self.embedding(sent_pos_ids)\n out = batch_elem_emb + batch_pos_emb\n out = self.dropout(self.layer_norm(out))\n\n return out\n\n\nclass MentionTypeEncoder(nn.Module):\n def __init__(self, hidden_size, num_ment_types, dropout=0.1):\n super(MentionTypeEncoder, self).__init__()\n\n self.embedding = nn.Embedding(num_ment_types, hidden_size)\n self.layer_norm = transformer.LayerNorm(hidden_size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, batch_mention_emb, mention_type_ids):\n if not isinstance(mention_type_ids, torch.Tensor):\n mention_type_ids = torch.tensor(\n mention_type_ids, dtype=torch.long, device=batch_mention_emb.device, requires_grad=False\n )\n\n batch_mention_type_emb = self.embedding(mention_type_ids)\n out = batch_mention_emb + batch_mention_type_emb\n out = self.dropout(self.layer_norm(out))\n\n return out\n\n\nclass EventTable(nn.Module):\n def __init__(self, event_type, field_types, hidden_size):\n super(EventTable, self).__init__()\n\n self.event_type = event_type\n self.field_types = field_types\n self.num_fields = len(field_types)\n self.hidden_size = hidden_size\n\n self.event_cls = nn.Linear(hidden_size, 2) # 0: NA, 1: trigger this event\n self.field_cls_list = nn.ModuleList(\n # 0: NA, 1: trigger this field\n [nn.Linear(hidden_size, 2) for _ in range(self.num_fields)]\n )\n\n # used to aggregate sentence and span embedding\n self.event_query = nn.Parameter(torch.Tensor(1, self.hidden_size))\n # used for fields that do not contain any valid span\n # self.none_span_emb = nn.Parameter(torch.Tensor(1, self.hidden_size))\n # used for aggregating history filled span info\n self.field_queries = nn.ParameterList(\n [nn.Parameter(torch.Tensor(1, self.hidden_size)) for _ in range(self.num_fields)]\n )\n\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.hidden_size)\n self.event_query.data.uniform_(-stdv, stdv)\n # self.none_span_emb.data.uniform_(-stdv, stdv)\n for fq in self.field_queries:\n fq.data.uniform_(-stdv, stdv)\n\n def forward(self, sent_context_emb=None, batch_span_emb=None, field_idx=None):\n assert (sent_context_emb is None) ^ (batch_span_emb is None)\n\n if sent_context_emb is not None: # [num_spans+num_sents, hidden_size]\n # doc_emb.size = [1, hidden_size]\n doc_emb, _ = transformer.attention(self.event_query, sent_context_emb, sent_context_emb)\n doc_pred_logits = self.event_cls(doc_emb)\n doc_pred_logp = F.log_softmax(doc_pred_logits, dim=-1)\n\n return doc_pred_logp\n\n if batch_span_emb is not None:\n assert field_idx is not None\n # span_context_emb: [batch_size, hidden_size] or [hidden_size]\n if batch_span_emb.dim() == 1:\n batch_span_emb = batch_span_emb.unsqueeze(0)\n span_pred_logits = self.field_cls_list[field_idx](batch_span_emb)\n span_pred_logp = F.log_softmax(span_pred_logits, dim=-1)\n\n return span_pred_logp\n\n def extra_repr(self):\n return 'event_type={}, num_fields={}, hidden_size={}'.format(\n self.event_type, self.num_fields, self.hidden_size\n )\n\n\nclass MLP(nn.Module):\n \"\"\"Implements Multi-layer Perception.\"\"\"\n\n def __init__(self, input_size, output_size, mid_size=None, num_mid_layer=1, dropout=0.1):\n super(MLP, self).__init__()\n\n assert num_mid_layer >= 1\n if mid_size is None:\n mid_size = input_size\n\n self.input_fc = nn.Linear(input_size, mid_size)\n self.out_fc = nn.Linear(mid_size, output_size)\n if num_mid_layer > 1:\n self.mid_fcs = nn.ModuleList(\n nn.Linear(mid_size, mid_size) for _ in range(num_mid_layer-1)\n )\n else:\n self.mid_fcs = []\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n x = self.dropout(F.relu(self.input_fc(x)))\n for mid_fc in self.mid_fcs:\n x = self.dropout(F.relu(mid_fc(x)))\n x = self.out_fc(x)\n return x\n\n\ndef get_span_mention_info(span_dranges_list, doc_token_type_list):\n span_mention_range_list = []\n mention_drange_list = []\n mention_type_list = []\n for span_dranges in span_dranges_list:\n ment_idx_s = len(mention_drange_list)\n for drange in span_dranges:\n mention_drange_list.append(drange)\n sent_idx, char_s, char_e = drange\n mention_type_list.append(doc_token_type_list[sent_idx][char_s])\n ment_idx_e = len(mention_drange_list)\n span_mention_range_list.append((ment_idx_s, ment_idx_e))\n\n return span_mention_range_list, mention_drange_list, mention_type_list\n\n\ndef extract_doc_valid_span_info(doc_token_type_mat, doc_fea):\n doc_token_id_mat = doc_fea.doc_token_ids.tolist()\n doc_token_mask_mat = doc_fea.doc_token_masks.tolist()\n\n # [(token_id_tuple, (sent_idx, char_s, char_e)), ...]\n span_token_drange_list = []\n\n valid_sent_num = doc_fea.valid_sent_num\n for sent_idx in range(valid_sent_num):\n seq_token_id_list = doc_token_id_mat[sent_idx]\n seq_token_mask_list = doc_token_mask_mat[sent_idx]\n seq_token_type_list = doc_token_type_mat[sent_idx]\n seq_len = len(seq_token_id_list)\n\n char_s = 0\n while char_s < seq_len:\n if seq_token_mask_list[char_s] == 0:\n break\n\n entity_idx = seq_token_type_list[char_s]\n\n if entity_idx % 2 == 1:\n char_e = char_s + 1\n while char_e < seq_len and seq_token_mask_list[char_e] == 1 and \\\n seq_token_type_list[char_e] == entity_idx + 1:\n char_e += 1\n\n token_tup = tuple(seq_token_id_list[char_s:char_e])\n drange = (sent_idx, char_s, char_e)\n\n span_token_drange_list.append((token_tup, drange))\n\n char_s = char_e\n else:\n char_s += 1\n\n span_token_drange_list.sort(key=lambda x: x[-1]) # sorted by drange = (sent_idx, char_s, char_e)\n # drange is exclusive and sorted\n token_tup2dranges = OrderedDict()\n for token_tup, drange in span_token_drange_list:\n if token_tup not in token_tup2dranges:\n token_tup2dranges[token_tup] = []\n token_tup2dranges[token_tup].append(drange)\n\n span_token_tup_list = list(token_tup2dranges.keys())\n span_dranges_list = list(token_tup2dranges.values())\n\n return span_token_tup_list, span_dranges_list\n\n\ndef get_batch_span_label(num_spans, cur_span_idx_set, device):\n # prepare span labels for this field and this path\n span_field_labels = [\n 1 if span_idx in cur_span_idx_set else 0 for span_idx in range(num_spans)\n ]\n\n batch_field_label = torch.tensor(\n span_field_labels, dtype=torch.long, device=device, requires_grad=False\n ) # [num_spans], val \\in {0, 1}\n\n return batch_field_label\n\n\nclass DCFEEModel(nn.Module):\n \"\"\"\n This module implements the baseline model described in http://www.aclweb.org/anthology/P18-4009:\n \"DCFEE: A Document-level Chinese Financial Event Extraction System\n based on Automatically Labeled Training Data\"\n \"\"\"\n\n def __init__(self, config, event_type_fields_pairs, ner_model=None):\n super(DCFEEModel, self).__init__()\n # Note that for distributed training, you must ensure that\n # for any batch, all parameters need to be used\n\n self.config = config\n self.event_type_fields_pairs = event_type_fields_pairs\n\n if ner_model is None:\n self.ner_model = NERModel(config)\n else:\n self.ner_model = ner_model\n\n # attentively reduce token embedding into sentence embedding\n self.doc_token_reducer = AttentiveReducer(config.hidden_size, dropout=config.dropout)\n # map sentence embedding to event prediction logits\n self.event_cls_layers = nn.ModuleList([\n nn.Linear(config.hidden_size, 2) for _ in self.event_type_fields_pairs\n ])\n\n def get_batch_sent_emb(self, ner_token_emb, ner_token_masks, valid_sent_num_list):\n # From [ner_batch_size, sent_len, hidden_size] to [ner_batch_size, hidden_size]\n total_sent_emb = self.doc_token_reducer(ner_token_emb, ner_token_masks)\n total_sent_pos_ids = []\n for valid_sent_num in valid_sent_num_list:\n total_sent_pos_ids += list(range(valid_sent_num))\n\n return total_sent_emb\n\n def get_loss_on_doc(self, doc_sent_emb, doc_fea):\n doc_sent_label_mat = torch.tensor(\n doc_fea.doc_sent_labels, dtype=torch.long, device=doc_sent_emb.device, requires_grad=False\n )\n event_cls_loss_list = []\n for event_idx, event_cls in enumerate(self.event_cls_layers):\n doc_sent_logits = event_cls(doc_sent_emb) # [sent_num, 2]\n doc_sent_labels = doc_sent_label_mat[:, event_idx] # [sent_num]\n event_cls_loss = F.cross_entropy(doc_sent_logits, doc_sent_labels, reduction='sum')\n event_cls_loss_list.append(event_cls_loss)\n\n final_loss = sum(event_cls_loss_list)\n return final_loss\n\n def get_mix_loss(self, doc_sent_loss_list, doc_event_loss_list, doc_span_info_list):\n batch_size = len(doc_span_info_list)\n loss_batch_avg = 1.0 / batch_size\n lambda_1 = self.config.loss_lambda\n lambda_2 = 1 - lambda_1\n\n doc_ner_loss_list = []\n for doc_sent_loss, doc_span_info in zip(doc_sent_loss_list, doc_span_info_list):\n # doc_sent_loss: Size([num_valid_sents])\n sent_loss_scaling = doc_sent_loss.new_full(\n doc_sent_loss.size(), 1, requires_grad=False\n )\n sent_loss_scaling[doc_span_info.missed_sent_idx_list] = self.config.loss_gamma\n doc_ner_loss = (doc_sent_loss * sent_loss_scaling).sum()\n doc_ner_loss_list.append(doc_ner_loss)\n\n return loss_batch_avg * (lambda_1 * sum(doc_ner_loss_list) + lambda_2 * sum(doc_event_loss_list))\n\n def get_local_context_info(self, doc_batch_dict, train_flag=False, use_gold_span=False):\n label_key = 'doc_token_labels'\n if train_flag or use_gold_span:\n assert label_key in doc_batch_dict\n need_label_flag = True\n else:\n need_label_flag = False\n\n if need_label_flag:\n doc_token_labels_list = doc_batch_dict[label_key]\n else:\n doc_token_labels_list = None\n\n batch_size = len(doc_batch_dict['ex_idx'])\n doc_token_ids_list = doc_batch_dict['doc_token_ids']\n doc_token_masks_list = doc_batch_dict['doc_token_masks']\n valid_sent_num_list = doc_batch_dict['valid_sent_num']\n\n # transform doc_batch into sent_batch\n ner_batch_idx_start_list = [0]\n ner_token_ids = []\n ner_token_masks = []\n ner_token_labels = [] if need_label_flag else None\n for batch_idx, valid_sent_num in enumerate(valid_sent_num_list):\n idx_start = ner_batch_idx_start_list[-1]\n idx_end = idx_start + valid_sent_num\n ner_batch_idx_start_list.append(idx_end)\n\n ner_token_ids.append(doc_token_ids_list[batch_idx])\n ner_token_masks.append(doc_token_masks_list[batch_idx])\n if need_label_flag:\n ner_token_labels.append(doc_token_labels_list[batch_idx])\n\n # [ner_batch_size, norm_sent_len]\n ner_token_ids = torch.cat(ner_token_ids, dim=0)\n ner_token_masks = torch.cat(ner_token_masks, dim=0)\n if need_label_flag:\n ner_token_labels = torch.cat(ner_token_labels, dim=0)\n\n # get ner output\n ner_token_emb, ner_loss, ner_token_preds = self.ner_model(\n ner_token_ids, ner_token_masks, label_ids=ner_token_labels,\n train_flag=train_flag, decode_flag=not use_gold_span,\n )\n\n if use_gold_span: # definitely use gold span info\n ner_token_types = ner_token_labels\n else:\n ner_token_types = ner_token_preds\n\n # get sentence embedding\n ner_sent_emb = self.get_batch_sent_emb(ner_token_emb, ner_token_masks, valid_sent_num_list)\n\n assert sum(valid_sent_num_list) == ner_token_emb.size(0) == ner_sent_emb.size(0)\n\n # followings are all lists of tensors\n doc_token_emb_list = []\n doc_token_masks_list = []\n doc_token_types_list = []\n doc_sent_emb_list = []\n doc_sent_loss_list = []\n for batch_idx in range(batch_size):\n idx_start = ner_batch_idx_start_list[batch_idx]\n idx_end = ner_batch_idx_start_list[batch_idx+1]\n doc_token_emb_list.append(ner_token_emb[idx_start:idx_end, :, :])\n doc_token_masks_list.append(ner_token_masks[idx_start:idx_end, :])\n doc_token_types_list.append(ner_token_types[idx_start:idx_end, :])\n doc_sent_emb_list.append(ner_sent_emb[idx_start:idx_end, :])\n if ner_loss is not None:\n # every doc_sent_loss.size is [valid_sent_num]\n doc_sent_loss_list.append(ner_loss[idx_start:idx_end])\n\n return doc_token_emb_list, doc_token_masks_list, doc_token_types_list, doc_sent_emb_list, doc_sent_loss_list\n\n def forward(self, doc_batch_dict, doc_features,\n use_gold_span=False, train_flag=True, heuristic_type='DCFEE-O',\n event_idx2entity_idx2field_idx=None, **kwargs):\n # DCFEE does not need scheduled sampling\n # get doc token-level local context\n doc_token_emb_list, doc_token_masks_list, doc_token_types_list, doc_sent_emb_list, doc_sent_loss_list = \\\n self.get_local_context_info(\n doc_batch_dict, train_flag=train_flag, use_gold_span=use_gold_span,\n )\n\n # get doc feature objects\n ex_idx_list = doc_batch_dict['ex_idx']\n doc_fea_list = [doc_features[ex_idx] for ex_idx in ex_idx_list]\n\n # get doc span-level info for event extraction\n doc_span_info_list = get_doc_span_info_list(doc_token_types_list, doc_fea_list, use_gold_span=use_gold_span)\n\n if train_flag:\n doc_event_loss_list = []\n for batch_idx, ex_idx in enumerate(ex_idx_list):\n doc_event_loss_list.append(\n self.get_loss_on_doc(\n doc_sent_emb_list[batch_idx],\n doc_fea_list[batch_idx],\n )\n )\n\n mix_loss = self.get_mix_loss(doc_sent_loss_list, doc_event_loss_list, doc_span_info_list)\n\n return mix_loss\n else:\n # return a list object may not be supported by torch.nn.parallel.DataParallel\n # ensure to run it under the single-gpu mode\n eval_results = []\n\n assert event_idx2entity_idx2field_idx is not None\n for batch_idx, ex_idx in enumerate(ex_idx_list):\n eval_results.append(\n self.heuristic_decode_on_doc(\n doc_sent_emb_list[batch_idx],\n doc_fea_list[batch_idx],\n doc_span_info_list[batch_idx],\n event_idx2entity_idx2field_idx,\n heuristic_type=heuristic_type,\n )\n )\n\n return eval_results\n\n def heuristic_decode_on_doc(self, doc_sent_emb, doc_fea, doc_span_info,\n event_idx2entity_idx2field_idx, heuristic_type='DCFEE-O'):\n # DCFEE-O: just produce One event per triggered sentence\n # DCFEE-M: produce Multiple potential events per triggered sentence\n support_heuristic_types = ['DCFEE-O', 'DCFEE-M']\n if heuristic_type not in support_heuristic_types:\n raise Exception('Unsupported heuristic type {}, pleasure choose from {}'.format(\n heuristic_type, str(support_heuristic_types)\n ))\n\n span_token_tup_list = doc_span_info.span_token_tup_list\n span_mention_range_list = doc_span_info.span_mention_range_list\n mention_drange_list = doc_span_info.mention_drange_list\n mention_type_list = doc_span_info.mention_type_list\n # heuristic decoding strategies will work on these span candidates\n event_idx2field_idx2span_token_tup2dranges = self.get_event_field_span_candidates(\n span_token_tup_list, span_mention_range_list, mention_drange_list,\n mention_type_list, event_idx2entity_idx2field_idx,\n )\n\n # if there is no extracted span, just directly return\n if len(span_token_tup_list) == 0:\n event_pred_list = []\n event_idx2obj_idx2field_idx2token_tup = [] # this term will be compared with ground-truth table contents\n for event_idx in range(len(self.event_type_fields_pairs)):\n event_pred_list.append(0)\n event_idx2obj_idx2field_idx2token_tup.append(None)\n\n return doc_fea.ex_idx, event_pred_list, event_idx2obj_idx2field_idx2token_tup, \\\n doc_span_info, event_idx2field_idx2span_token_tup2dranges\n\n event_idx2key_sent_idx_list = []\n event_pred_list = []\n event_idx2obj_idx2field_idx2token_tup = []\n for event_idx, event_cls in enumerate(self.event_cls_layers):\n event_type, field_types = self.event_type_fields_pairs[event_idx]\n num_fields = len(field_types)\n field_idx2span_token_tup2dranges = event_idx2field_idx2span_token_tup2dranges[event_idx]\n\n # get key event sentence prediction\n doc_sent_logits = event_cls(doc_sent_emb) # [sent_num, 2]\n doc_sent_logp = F.log_softmax(doc_sent_logits, dim=-1) # [sent_num, 2]\n doc_sent_pred_list = doc_sent_logp.argmax(dim=-1).tolist()\n key_sent_idx_list = [\n sent_idx for sent_idx, sent_pred in enumerate(doc_sent_pred_list) if sent_pred == 1\n ]\n event_idx2key_sent_idx_list.append(key_sent_idx_list)\n\n if len(key_sent_idx_list) == 0:\n event_pred_list.append(0)\n event_idx2obj_idx2field_idx2token_tup.append(None)\n else:\n obj_idx2field_idx2token_tup = []\n for key_sent_idx in key_sent_idx_list:\n if heuristic_type == support_heuristic_types[0]:\n field_idx2token_tup = get_one_key_sent_event(\n key_sent_idx, num_fields, field_idx2span_token_tup2dranges\n )\n obj_idx2field_idx2token_tup.append(field_idx2token_tup)\n elif heuristic_type == support_heuristic_types[1]:\n field_idx2token_tup_list = get_many_key_sent_event(\n key_sent_idx, num_fields, field_idx2span_token_tup2dranges\n )\n obj_idx2field_idx2token_tup.extend(field_idx2token_tup_list)\n else:\n raise Exception('Unsupported heuristic type {}, pleasure choose from {}'.format(\n heuristic_type, str(support_heuristic_types)\n ))\n event_pred_list.append(1)\n event_idx2obj_idx2field_idx2token_tup.append(obj_idx2field_idx2token_tup)\n\n return doc_fea.ex_idx, event_pred_list, event_idx2obj_idx2field_idx2token_tup, \\\n doc_span_info, event_idx2field_idx2span_token_tup2dranges, event_idx2key_sent_idx_list\n\n def get_event_field_span_candidates(self, span_token_tup_list, span_mention_range_list,\n mention_drange_list, mention_type_list, event_idx2entity_idx2field_idx):\n # get mention idx -> span idx\n mention_span_idx_list = []\n for span_idx, (ment_idx_s, ment_idx_e) in enumerate(span_mention_range_list):\n mention_span_idx_list.extend([span_idx] * (ment_idx_e - ment_idx_s))\n assert len(mention_span_idx_list) == len(mention_drange_list)\n\n event_idx2field_idx2span_token_tup2dranges = {}\n for event_idx, (event_type, field_types) in enumerate(self.event_type_fields_pairs):\n # get the predefined entity idx to field idx mapping\n gold_entity_idx2field_idx = event_idx2entity_idx2field_idx[event_idx]\n\n # store field candidates for this doc\n field_idx2span_token_tup2dranges = {}\n for field_idx, _ in enumerate(field_types):\n field_idx2span_token_tup2dranges[field_idx] = {}\n\n # aggregate field candidates according to mention types\n for ment_idx, (ment_drange, ment_entity_idx) in enumerate(zip(mention_drange_list, mention_type_list)):\n if ment_entity_idx not in gold_entity_idx2field_idx:\n continue\n ment_field_idx = gold_entity_idx2field_idx[ment_entity_idx]\n if ment_field_idx is None:\n continue\n\n ment_span_idx = mention_span_idx_list[ment_idx]\n span_token_tup = span_token_tup_list[ment_span_idx]\n\n # because it is dict, so all modifications to the key will take effect in raw dict\n cur_span_token_tup2dranges = field_idx2span_token_tup2dranges[ment_field_idx]\n if span_token_tup not in cur_span_token_tup2dranges:\n cur_span_token_tup2dranges[span_token_tup] = []\n cur_span_token_tup2dranges[span_token_tup].append(ment_drange)\n\n event_idx2field_idx2span_token_tup2dranges[event_idx] = field_idx2span_token_tup2dranges\n\n return event_idx2field_idx2span_token_tup2dranges\n\n\ndef get_one_key_sent_event(key_sent_idx, num_fields, field_idx2span_token_tup2dranges):\n field_idx2token_tup = []\n for field_idx in range(num_fields):\n token_tup2dranges = field_idx2span_token_tup2dranges[field_idx]\n\n # find the closest token_tup to the key sentence\n best_token_tup = None\n best_dist = 10000\n for token_tup, dranges in token_tup2dranges.items():\n for sent_idx, _, _ in dranges:\n cur_dist = abs(sent_idx - key_sent_idx)\n if cur_dist < best_dist:\n best_token_tup = token_tup\n best_dist = cur_dist\n\n field_idx2token_tup.append(best_token_tup)\n return field_idx2token_tup\n\n\ndef get_many_key_sent_event(key_sent_idx, num_fields, field_idx2span_token_tup2dranges):\n # get key_field_idx contained in key event sentence\n key_field_idx2token_tup_set = defaultdict(lambda: set())\n for field_idx, token_tup2dranges in field_idx2span_token_tup2dranges.items():\n assert field_idx < num_fields\n for token_tup, dranges in token_tup2dranges.items():\n for sent_idx, _, _ in dranges:\n if sent_idx == key_sent_idx:\n key_field_idx2token_tup_set[field_idx].add(token_tup)\n\n field_idx2token_tup_list = []\n while len(key_field_idx2token_tup_set) > 0:\n # get key token tup candidates according to the distance in the sentence\n prev_field_idx = None\n prev_token_cand = None\n key_field_idx2token_cand = {}\n for key_field_idx, token_tup_set in key_field_idx2token_tup_set.items():\n assert len(token_tup_set) > 0\n\n if prev_token_cand is None:\n best_token_tup = token_tup_set.pop()\n else:\n prev_char_range = field_idx2span_token_tup2dranges[prev_field_idx][prev_token_cand][0][1:]\n best_dist = 10000\n best_token_tup = None\n for token_tup in token_tup_set:\n cur_char_range = field_idx2span_token_tup2dranges[key_field_idx][token_tup][0][1:]\n cur_dist = min(\n abs(cur_char_range[1] - prev_char_range[0]),\n abs(cur_char_range[0] - prev_char_range[1])\n )\n if cur_dist < best_dist:\n best_dist = cur_dist\n best_token_tup = token_tup\n token_tup_set.remove(best_token_tup)\n\n key_field_idx2token_cand[key_field_idx] = best_token_tup\n prev_field_idx = key_field_idx\n prev_token_cand = best_token_tup\n\n field_idx2token_tup = []\n for field_idx in range(num_fields):\n token_tup2dranges = field_idx2span_token_tup2dranges[field_idx]\n\n if field_idx in key_field_idx2token_tup_set:\n token_tup_set = key_field_idx2token_tup_set[field_idx]\n if len(token_tup_set) == 0:\n del key_field_idx2token_tup_set[field_idx]\n token_tup = key_field_idx2token_cand[field_idx]\n field_idx2token_tup.append(token_tup)\n else:\n # find the closest token_tup to the key sentence\n best_token_tup = None\n best_dist = 10000\n for token_tup, dranges in token_tup2dranges.items():\n for sent_idx, _, _ in dranges:\n cur_dist = abs(sent_idx - key_sent_idx)\n if cur_dist < best_dist:\n best_token_tup = token_tup\n best_dist = cur_dist\n\n field_idx2token_tup.append(best_token_tup)\n\n field_idx2token_tup_list.append(field_idx2token_tup)\n\n return field_idx2token_tup_list\n\n\n\n",
"# -*- coding: utf-8 -*-\n# AUTHOR: Shun Zheng\n# DATE: 19-9-19\n\nimport logging\nimport os\nimport torch.optim as optim\nimport torch.distributed as dist\nfrom itertools import product\n\nfrom .dee_helper import logger, DEEExample, DEEExampleLoader, DEEFeatureConverter, \\\n convert_dee_features_to_dataset, prepare_doc_batch_dict, measure_dee_prediction, \\\n decode_dump_template, eval_dump_template\nfrom .utils import BERTChineseCharacterTokenizer, default_dump_json, default_load_pkl\nfrom .ner_model import BertForBasicNER\nfrom .base_task import TaskSetting, BasePytorchTask\nfrom .event_type import event_type_fields_list\nfrom .dee_model import Doc2EDAGModel, DCFEEModel\n\n\nclass DEETaskSetting(TaskSetting):\n base_key_attrs = TaskSetting.base_key_attrs\n base_attr_default_pairs = [\n ('train_file_name', 'dev.json'),\n ('dev_file_name', 'dev.json'),\n ('test_file_name', 'test.json'),\n ('summary_dir_name', './tmp/Summary'),\n ('max_sent_len', 128),\n ('max_sent_num', 10),\n ('train_batch_size', 8),\n ('gradient_accumulation_steps', 4),\n ('eval_batch_size', 8),\n ('learning_rate', 1e-4),\n ('num_train_epochs', 100),\n ('no_cuda', False),\n ('local_rank', -1),\n ('seed', 99),\n ('optimize_on_cpu', False),\n ('fp16', False),\n ('use_bert', False), # whether to use bert as the encoder\n ('bert_model', 'bert-base-chinese'), # use which pretrained bert model\n ('only_master_logging', True), # whether to print logs from multiple processes\n ('resume_latest_cpt', True), # whether to resume latest checkpoints when training for fault tolerance\n ('cpt_file_name', 'Doc2EDAG'), # decide the identity of checkpoints, evaluation results, etc.\n ('model_type', 'Doc2EDAG'), # decide the model class used\n ('rearrange_sent', False), # whether to rearrange sentences\n ('use_crf_layer', True), # whether to use CRF Layer\n ('min_teacher_prob', 0.1), # the minimum prob to use gold spans\n ('schedule_epoch_start', 10), # from which epoch the scheduled sampling starts\n ('schedule_epoch_length', 10), # the number of epochs to linearly transit to the min_teacher_prob\n ('loss_lambda', 0.05), # the proportion of ner loss\n ('loss_gamma', 1.0), # the scaling proportion of missed span sentence ner loss\n ('add_greedy_dec', True), # whether to add additional greedy decoding\n ('use_token_role', True), # whether to use detailed token role\n ('seq_reduce_type', 'MaxPooling'), # use 'MaxPooling', 'MeanPooling' or 'AWA' to reduce a tensor sequence\n # network parameters (follow Bert Base)\n ('hidden_size', 768),\n ('dropout', 0.1),\n ('ff_size', 1024), # feed-forward mid layer size\n ('num_tf_layers', 4), # transformer layer number\n # ablation study parameters,\n ('use_path_mem', True), # whether to use the memory module when expanding paths\n ('use_scheduled_sampling', True), # whether to use the scheduled sampling\n ('use_doc_enc', True), # whether to use document-level entity encoding\n ('neg_field_loss_scaling', 3.0), # prefer FNs over FPs\n ]\n\n def __init__(self, **kwargs):\n super(DEETaskSetting, self).__init__(\n self.base_key_attrs, self.base_attr_default_pairs, **kwargs\n )\n\n\nclass DEETask(BasePytorchTask):\n \"\"\"Doc-level Event Extraction Task\"\"\"\n\n def __init__(self, dee_setting, load_train=True, load_dev=True, load_test=True,\n parallel_decorate=True):\n super(DEETask, self).__init__(dee_setting, only_master_logging=dee_setting.only_master_logging)\n self.logger = logging.getLogger(self.__class__.__name__)\n self.logging('Initializing {}'.format(self.__class__.__name__))\n\n self.tokenizer = BERTChineseCharacterTokenizer.from_pretrained(self.setting.bert_model)\n self.setting.vocab_size = len(self.tokenizer.vocab)\n\n # get entity and event label name\n self.entity_label_list = DEEExample.get_entity_label_list()\n self.event_type_fields_pairs = DEEExample.get_event_type_fields_pairs()\n self.example_loader_func = DEEExampleLoader(self.setting.rearrange_sent, self.setting.max_sent_len)\n\n # build feature converter\n if self.setting.use_bert:\n self.feature_converter_func = DEEFeatureConverter(\n self.entity_label_list, self.event_type_fields_pairs,\n self.setting.max_sent_len, self.setting.max_sent_num, self.tokenizer,\n include_cls=True, include_sep=True,\n )\n else:\n self.feature_converter_func = DEEFeatureConverter(\n self.entity_label_list, self.event_type_fields_pairs,\n self.setting.max_sent_len, self.setting.max_sent_num, self.tokenizer,\n include_cls=False, include_sep=False,\n )\n\n\n # load data\n self._load_data(\n self.example_loader_func, self.feature_converter_func, convert_dee_features_to_dataset,\n load_train=load_train, load_dev=load_dev, load_test=load_test,\n )\n # customized mini-batch producer\n self.custom_collate_fn = prepare_doc_batch_dict\n\n if not self.setting.use_token_role:\n # no token role conflicts with some settings\n assert self.setting.model_type == 'Doc2EDAG'\n assert self.setting.add_greedy_dec is False\n self.setting.num_entity_labels = 3 # 0: 'O', 1: 'Begin', 2: 'Inside'\n else:\n self.setting.num_entity_labels = len(self.entity_label_list)\n\n if self.setting.use_bert:\n ner_model = BertForBasicNER.from_pretrained(\n self.setting.bert_model, num_entity_labels = self.setting.num_entity_labels\n )\n self.setting.update_by_dict(ner_model.config.__dict__) # BertConfig dictionary\n\n # substitute pooler in bert to support distributed training\n # because unused parameters will cause errors when conducting distributed all_reduce\n class PseudoPooler(object):\n def __init__(self):\n pass\n\n def __call__(self, *x):\n return x\n del ner_model.bert.pooler\n ner_model.bert.pooler = PseudoPooler()\n else:\n ner_model = None\n\n if self.setting.model_type == 'Doc2EDAG':\n self.model = Doc2EDAGModel(\n self.setting, self.event_type_fields_pairs, ner_model=ner_model,\n )\n elif self.setting.model_type == 'DCFEE':\n self.model = DCFEEModel(\n self.setting, self.event_type_fields_pairs, ner_model=ner_model\n )\n else:\n raise Exception('Unsupported model type {}'.format(self.setting.model_type))\n\n self._decorate_model(parallel_decorate=parallel_decorate)\n\n # prepare optimizer\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.setting.learning_rate)\n\n # # resume option\n # if resume_model or resume_optimizer:\n # self.resume_checkpoint(resume_model=resume_model, resume_optimizer=resume_optimizer)\n\n self.min_teacher_prob = None\n self.teacher_norm = None\n self.teacher_cnt = None\n self.teacher_base = None\n self.reset_teacher_prob()\n\n self.logging('Successfully initialize {}'.format(self.__class__.__name__))\n\n def reset_teacher_prob(self):\n self.min_teacher_prob = self.setting.min_teacher_prob\n if self.train_dataset is None:\n # avoid crashing when not loading training data\n num_step_per_epoch = 500\n else:\n num_step_per_epoch = int(len(self.train_dataset) / self.setting.train_batch_size)\n self.teacher_norm = num_step_per_epoch * self.setting.schedule_epoch_length\n self.teacher_base = num_step_per_epoch * self.setting.schedule_epoch_start\n self.teacher_cnt = 0\n\n def get_teacher_prob(self, batch_inc_flag=True):\n if self.teacher_cnt < self.teacher_base:\n prob = 1\n else:\n prob = max(\n self.min_teacher_prob, (self.teacher_norm - self.teacher_cnt + self.teacher_base) / self.teacher_norm\n )\n\n if batch_inc_flag:\n self.teacher_cnt += 1\n\n return prob\n\n def get_event_idx2entity_idx2field_idx(self):\n entity_idx2entity_type = {}\n for entity_idx, entity_label in enumerate(self.entity_label_list):\n if entity_label == 'O':\n entity_type = entity_label\n else:\n entity_type = entity_label[2:]\n\n entity_idx2entity_type[entity_idx] = entity_type\n\n event_idx2entity_idx2field_idx = {}\n for event_idx, (event_name, field_types) in enumerate(self.event_type_fields_pairs):\n field_type2field_idx = {}\n for field_idx, field_type in enumerate(field_types):\n field_type2field_idx[field_type] = field_idx\n\n entity_idx2field_idx = {}\n for entity_idx, entity_type in entity_idx2entity_type.items():\n if entity_type in field_type2field_idx:\n entity_idx2field_idx[entity_idx] = field_type2field_idx[entity_type]\n else:\n entity_idx2field_idx[entity_idx] = None\n\n event_idx2entity_idx2field_idx[event_idx] = entity_idx2field_idx\n\n return event_idx2entity_idx2field_idx\n\n def get_loss_on_batch(self, doc_batch_dict, features=None):\n if features is None:\n features = self.train_features\n\n # teacher_prob = 1\n # if use_gold_span, gold spans will be used every time\n # else, teacher_prob will ensure the proportion of using gold spans\n if self.setting.use_scheduled_sampling:\n use_gold_span = False\n teacher_prob = self.get_teacher_prob()\n else:\n use_gold_span = True\n teacher_prob = 1\n\n try:\n loss = self.model(\n doc_batch_dict, features, use_gold_span=use_gold_span, train_flag=True, teacher_prob=teacher_prob\n )\n except Exception as e:\n print('-'*30)\n print('Exception occurs when processing ' +\n ','.join([features[ex_idx].guid for ex_idx in doc_batch_dict['ex_idx']]))\n raise Exception('Cannot get the loss')\n\n return loss\n\n def get_event_decode_result_on_batch(self, doc_batch_dict, features=None, use_gold_span=False, heuristic_type=None):\n if features is None:\n raise Exception('Features mush be provided')\n\n if heuristic_type is None:\n event_idx2entity_idx2field_idx = None\n else:\n # this mapping is used to get span candidates for each event field\n event_idx2entity_idx2field_idx = self.get_event_idx2entity_idx2field_idx()\n\n batch_eval_results = self.model(\n doc_batch_dict, features, use_gold_span=use_gold_span, train_flag=False,\n event_idx2entity_idx2field_idx=event_idx2entity_idx2field_idx, heuristic_type=heuristic_type,\n )\n\n return batch_eval_results\n\n def train(self, save_cpt_flag=True, resume_base_epoch=None):\n self.logging('=' * 20 + 'Start Training' + '=' * 20)\n self.reset_teacher_prob()\n\n # resume_base_epoch arguments have higher priority over settings\n if resume_base_epoch is None:\n # whether to resume latest cpt when restarting, very useful for preemptive scheduling clusters\n if self.setting.resume_latest_cpt:\n resume_base_epoch = self.get_latest_cpt_epoch()\n else:\n resume_base_epoch = 0\n\n # resume cpt if possible\n if resume_base_epoch > 0:\n self.logging('Training starts from epoch {}'.format(resume_base_epoch))\n for _ in range(resume_base_epoch):\n self.get_teacher_prob()\n self.resume_cpt_at(resume_base_epoch, resume_model=True, resume_optimizer=True)\n else:\n self.logging('Training starts from scratch')\n\n self.base_train(\n DEETask.get_loss_on_batch,\n kwargs_dict1={},\n epoch_eval_func=DEETask.resume_save_eval_at,\n kwargs_dict2={\n 'save_cpt_flag': save_cpt_flag,\n 'resume_cpt_flag': False,\n },\n base_epoch_idx=resume_base_epoch,\n )\n\n def resume_save_eval_at(self, epoch, resume_cpt_flag=False, save_cpt_flag=True):\n if self.is_master_node():\n print('\\nPROGRESS: {:.2f}%\\n'.format(epoch / self.setting.num_train_epochs * 100))\n self.logging('Current teacher prob {}'.format(self.get_teacher_prob(batch_inc_flag=False)))\n\n if resume_cpt_flag:\n self.resume_cpt_at(epoch)\n\n if self.is_master_node() and save_cpt_flag:\n self.save_cpt_at(epoch)\n\n if self.setting.model_type == 'DCFEE':\n eval_tasks = product(['dev', 'test'], [False, True], ['DCFEE-O', 'DCFEE-M'])\n else:\n if self.setting.add_greedy_dec:\n eval_tasks = product(['dev', 'test'], [False, True], ['GreedyDec', None])\n else:\n eval_tasks = product(['dev', 'test'], [False, True], [None])\n\n for task_idx, (data_type, gold_span_flag, heuristic_type) in enumerate(eval_tasks):\n if self.in_distributed_mode() and task_idx % dist.get_world_size() != dist.get_rank():\n continue\n\n if data_type == 'test':\n features = self.test_features\n dataset = self.test_dataset\n elif data_type == 'dev':\n features = self.dev_features\n dataset = self.dev_dataset\n else:\n raise Exception('Unsupported data type {}'.format(data_type))\n\n if gold_span_flag:\n span_str = 'gold_span'\n else:\n span_str = 'pred_span'\n\n if heuristic_type is None:\n # store user-provided name\n model_str = self.setting.cpt_file_name.replace('.', '~')\n else:\n model_str = heuristic_type\n\n decode_dump_name = decode_dump_template.format(data_type, span_str, model_str, epoch)\n eval_dump_name = eval_dump_template.format(data_type, span_str, model_str, epoch)\n self.eval(features, dataset, use_gold_span=gold_span_flag, heuristic_type=heuristic_type,\n dump_decode_pkl_name=decode_dump_name, dump_eval_json_name=eval_dump_name)\n\n def save_cpt_at(self, epoch):\n self.save_checkpoint(cpt_file_name='{}.cpt.{}'.format(self.setting.cpt_file_name, epoch), epoch=epoch)\n\n def resume_cpt_at(self, epoch, resume_model=True, resume_optimizer=False):\n self.resume_checkpoint(cpt_file_name='{}.cpt.{}'.format(self.setting.cpt_file_name, epoch),\n resume_model=resume_model, resume_optimizer=resume_optimizer)\n\n def get_latest_cpt_epoch(self):\n prev_epochs = []\n for fn in os.listdir(self.setting.model_dir):\n if fn.startswith('{}.cpt'.format(self.setting.cpt_file_name)):\n try:\n epoch = int(fn.split('.')[-1])\n prev_epochs.append(epoch)\n except Exception as e:\n continue\n prev_epochs.sort()\n\n if len(prev_epochs) > 0:\n latest_epoch = prev_epochs[-1]\n self.logging('Pick latest epoch {} from {}'.format(latest_epoch, str(prev_epochs)))\n else:\n latest_epoch = 0\n self.logging('No previous epoch checkpoints, just start from scratch')\n\n return latest_epoch\n\n def eval(self, features, dataset, use_gold_span=False, heuristic_type=None,\n dump_decode_pkl_name=None, dump_eval_json_name=None):\n self.logging('=' * 20 + 'Start Evaluation' + '=' * 20)\n\n if dump_decode_pkl_name is not None:\n dump_decode_pkl_path = os.path.join(self.setting.output_dir, dump_decode_pkl_name)\n self.logging('Dumping decode results into {}'.format(dump_decode_pkl_name))\n else:\n dump_decode_pkl_path = None\n\n total_event_decode_results = self.base_eval(\n dataset, DEETask.get_event_decode_result_on_batch,\n reduce_info_type='none', dump_pkl_path=dump_decode_pkl_path,\n features=features, use_gold_span=use_gold_span, heuristic_type=heuristic_type,\n )\n\n self.logging('Measure DEE Prediction')\n\n if dump_eval_json_name is not None:\n dump_eval_json_path = os.path.join(self.setting.output_dir, dump_eval_json_name)\n self.logging('Dumping eval results into {}'.format(dump_eval_json_name))\n else:\n dump_eval_json_path = None\n\n total_eval_res = measure_dee_prediction(\n self.event_type_fields_pairs, features, total_event_decode_results,\n dump_json_path=dump_eval_json_path\n )\n\n return total_event_decode_results, total_eval_res\n\n def reevaluate_dee_prediction(self, target_file_pre='dee_eval', target_file_suffix='.pkl',\n dump_flag=False):\n \"\"\"Enumerate the evaluation directory to collect all dumped evaluation results\"\"\"\n eval_dir_path = self.setting.output_dir\n logger.info('Re-evaluate dee predictions from {}'.format(eval_dir_path))\n data_span_type2model_str2epoch_res_list = {}\n for fn in os.listdir(eval_dir_path):\n fn_splits = fn.split('.')\n if fn.startswith(target_file_pre) and fn.endswith(target_file_suffix) and len(fn_splits) == 6:\n _, data_type, span_type, model_str, epoch, _ = fn_splits\n\n data_span_type = (data_type, span_type)\n if data_span_type not in data_span_type2model_str2epoch_res_list:\n data_span_type2model_str2epoch_res_list[data_span_type] = {}\n model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[data_span_type]\n\n if model_str not in model_str2epoch_res_list:\n model_str2epoch_res_list[model_str] = []\n epoch_res_list = model_str2epoch_res_list[model_str]\n\n if data_type == 'dev':\n features = self.dev_features\n elif data_type == 'test':\n features = self.test_features\n else:\n raise Exception('Unsupported data type {}'.format(data_type))\n\n epoch = int(epoch)\n fp = os.path.join(eval_dir_path, fn)\n self.logging('Re-evaluating {}'.format(fp))\n event_decode_results = default_load_pkl(fp)\n total_eval_res = measure_dee_prediction(\n event_type_fields_list, features, event_decode_results\n )\n\n if dump_flag:\n fp = fp.rstrip('.pkl') + '.json'\n self.logging('Dumping {}'.format(fp))\n default_dump_json(total_eval_res, fp)\n\n epoch_res_list.append((epoch, total_eval_res))\n\n for data_span_type, model_str2epoch_res_list in data_span_type2model_str2epoch_res_list.items():\n for model_str, epoch_res_list in model_str2epoch_res_list.items():\n epoch_res_list.sort(key=lambda x: x[0])\n\n return data_span_type2model_str2epoch_res_list\n\n\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.functional.nll_loss",
"torch.zeros",
"torch.cat",
"torch.Tensor",
"torch.nn.functional.log_softmax",
"torch.nn.functional.cross_entropy",
"torch.nn.Embedding",
"torch.tensor",
"torch.nn.Linear",
"torch.arange",
"torch.stack"
],
[
"torch.distributed.get_rank",
"torch.distributed.get_world_size"
]
] |
jensmcatanho/data-driven_astronomy
|
[
"46c32361378421079a7114e96c3c0fe8451748bb"
] |
[
"week1/1b/2_mean_of_a_set_of_signals/program.py"
] |
[
"import numpy as np\n\ndef mean_datasets(files):\n datasets = []\n\n for file in files:\n datasets.append(np.loadtxt(file, delimiter=','))\n\n result = np.mean(datasets, axis=0)\n return np.round(result, decimals=1)\n\nif __name__ == '__main__':\n # Test Case 1\n print(mean_datasets(['data1.csv', 'data2.csv', 'data3.csv']))\n\n # Test Case 2\n print(mean_datasets(['data4.csv', 'data5.csv', 'data6.csv']))\n"
] |
[
[
"numpy.round",
"numpy.mean",
"numpy.loadtxt"
]
] |
kkkkeeee/MinkowskiEngineM
|
[
"456a8f12aa89449d75369760d9f0398a5a2e14cd"
] |
[
"MinkowskiEngine/SparseTensor.py"
] |
[
"# Copyright (c) Chris Choy ([email protected]).\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Please cite \"4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural\n# Networks\", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part\n# of the code.\nimport os\nimport warnings\nimport torch\nimport copy\nfrom enum import Enum\nfrom typing import Union\nfrom collections import Sequence\nimport numpy as np\n\nfrom Common import convert_to_int_list\nfrom MinkowskiCoords import CoordsKey, CoordsManager\nimport MinkowskiEngineBackend as MEB\nfrom MinkowskiEngineBackend import MemoryManagerBackend\n\n\nclass SparseTensorOperationMode(Enum):\n \"\"\"\n `SEPARATE_COORDS_MANAGER`: always create a new coordinate manager.\n `SHARE_COORDS_MANAGER`: always use the globally defined coordinate manager. Must clear the coordinate manager manually by :attr:`MinkowskiEngine.SparseTensor.clear_global_coords_man`\n \"\"\"\n SEPARATE_COORDS_MANAGER = 0\n SHARE_COORDS_MANAGER = 1\n\n\nclass SparseTensorQuantizationMode(Enum):\n \"\"\"\n `RANDOM_SUBSAMPLE`: Subsample one coordinate per each quantization block randomly.\n `UNWEIGHTED_AVERAGE`: average all features within a quantization block equally.\n \"\"\"\n RANDOM_SUBSAMPLE = 0\n UNWEIGHTED_AVERAGE = 1\n\n\n_sparse_tensor_operation_mode = SparseTensorOperationMode.SEPARATE_COORDS_MANAGER\n_global_coords_man = None\nCOORDS_MAN_DIFFERENT_ERROR = \"SparseTensors must share the same coordinate manager for this operation. Please refer to the SparseTensor creation API (https://stanfordvl.github.io/MinkowskiEngine/sparse_tensor.html) to share the coordinate manager, or set the sparse tensor operation mode with `set_sparse_tensor_operation_mode` to share it by default.\"\nCOORDS_KEY_DIFFERENT_ERROR = \"SparseTensors must have the same coords_key.\"\n\n\ndef set_sparse_tensor_operation_mode(operation_mode: SparseTensorOperationMode):\n r\"\"\"Define the sparse tensor coordinate manager operation mode.\n\n By default, a :attr:`MinkowskiEngine.SparseTensor.SparseTensor`\n instantiation creates a new coordinate manager that is not shared with\n other sparse tensors. By setting this function with\n :attr:`MinkowskiEngine.SparseTensorOperationMode.SHARE_COORDS_MANAGER`, you\n can share the coordinate manager globally with other sparse tensors.\n However, you must explicitly clear the coordinate manger after use. Please\n refer to :attr:`MinkowskiEngine.clear_global_coords_man`.\n\n Args:\n :attr:`operation_mode`\n (:attr:`MinkowskiEngine.SparseTensorOperationMode`): The operation mode\n for the sparse tensor coordinate manager. By default\n :attr:`MinkowskiEngine.SparseTensorOperationMode.SEPARATE_COORDS_MANAGER`.\n\n Example:\n\n >>> import MinkowskiEngine as ME\n >>> ME.set_sparse_tensor_operation_mode(ME.SparseTensorOperationMode.SHARE_COORDS_MANAGER)\n >>> ...\n >>> a = ME.SparseTensor(coords=A_C, feats=A_F)\n >>> b = ME.SparseTensor(coords=B_C, feats=B_C) # coords_man shared\n >>> ... # one feed forward and backward\n >>> ME.clear_global_coords_man() # Must use to clear the coordinates after one forward/backward\n\n \"\"\"\n assert isinstance(operation_mode, SparseTensorOperationMode), \\\n f\"Input must be an instance of SparseTensorOperationMode not {operation_mode}\"\n global _sparse_tensor_operation_mode\n _sparse_tensor_operation_mode = operation_mode\n\n\ndef sparse_tensor_operation_mode():\n global _sparse_tensor_operation_mode\n return copy.deepcopy(_sparse_tensor_operation_mode)\n\n\ndef clear_global_coords_man():\n r\"\"\"Clear the global coordinate manager cache.\n\n When you use the operation mode:\n :attr:`MinkowskiEngine.SparseTensor.SparseTensorOperationMode.SHARE_COORDS_MANAGER`,\n you must explicitly clear the coordinate manager after each feed forward/backward.\n \"\"\"\n global _global_coords_man\n _global_coords_man = None\n\n\nclass SparseTensor():\n r\"\"\"A sparse tensor class. Can be accessed via\n :attr:`MinkowskiEngine.SparseTensor`.\n\n The :attr:`SparseTensor` class is the basic tensor in MinkowskiEngine. For\n the definition of a sparse tensor, please visit `the terminology page\n <https://stanfordvl.github.io/MinkowskiEngine/terminology.html#sparse-tensor>`_.\n We use the COOrdinate (COO) format to save a sparse tensor `[1]\n <http://groups.csail.mit.edu/commit/papers/2016/parker-thesis.pdf>`_. This\n representation is simply a concatenation of coordinates in a matrix\n :math:`C` and associated features :math:`F`.\n\n .. math::\n\n \\mathbf{C} = \\begin{bmatrix}\n b_1 & x_1^1 & x_1^2 & \\cdots & x_1^D \\\\\n \\vdots & \\vdots & \\vdots & \\ddots & \\vdots \\\\\n b_N & x_N^1 & x_N^2 & \\cdots & x_N^D\n \\end{bmatrix}, \\; \\mathbf{F} = \\begin{bmatrix}\n \\mathbf{f}_1^T\\\\\n \\vdots\\\\\n \\mathbf{f}_N^T\n \\end{bmatrix}\n\n where :math:`\\mathbf{x}_i \\in \\mathcal{Z}^D` is a :math:`D`-dimensional\n coordinate and :math:`b_i \\in \\mathcal{Z}_+` denotes the corresponding\n batch index. :math:`N` is the number of non-zero elements in the sparse\n tensor, each with the coordinate :math:`(b_i, x_i^1, x_i^1, \\cdots,\n x_i^D)`, and the associated feature :math:`\\mathbf{f}_i`. Internally, we\n handle the batch index as an additional spatial dimension.\n\n .. warning::\n\n Before MinkowskiEngine version 0.4, we put the batch indices on the last\n column. Thus, direct manipulation of coordinates will be incompatible\n with the latest versions. Instead, please use\n :attr:`MinkowskiEngine.utils.batched_coordinates` or\n :attr:`MinkowskiEngine.utils.sparse_collate` to create batched\n coordinates.\n\n Also, to access coordinates or features batch-wise, use the functions\n :attr:`coordinates_at(batch_index : int)`, :attr:`features_at(batch_index : int)` of\n a sparse tensor. Or to access all batch-wise coordinates and features,\n `decomposed_coordinates`, `decomposed_features`,\n `decomposed_coordinates_and_features` of a sparse tensor.\n\n Example::\n\n >>> coords, feats = ME.utils.sparse_collate([coords_batch0, coords_batch1], [feats_batch0, feats_batch1])\n >>> A = ME.SparseTensor(feats=feats, coords=coords)\n >>> coords_batch0 = A.coordinates_at(batch_index=0)\n >>> feats_batch1 = A.features_at(batch_index=1)\n >>> list_of_coords, list_of_featurs = A.decomposed_coordinates_and_features\n\n \"\"\"\n\n def __init__(\n self,\n feats,\n coords=None,\n coords_key=None,\n coords_manager=None,\n force_creation=False,\n allow_duplicate_coords=False,\n quantization_mode=SparseTensorQuantizationMode.RANDOM_SUBSAMPLE,\n memory_manager_backend: MemoryManagerBackend = None,\n tensor_stride=1):\n r\"\"\"\n\n Args:\n :attr:`feats` (:attr:`torch.FloatTensor`,\n :attr:`torch.DoubleTensor`, :attr:`torch.cuda.FloatTensor`, or\n :attr:`torch.cuda.DoubleTensor`): The features of the sparse\n tensor.\n\n :attr:`coords` (:attr:`torch.IntTensor`): The coordinates\n associated to the features. If not provided, :attr:`coords_key`\n must be provided.\n\n :attr:`coords_key` (:attr:`MinkowskiEngine.CoordsKey`): When the\n coordinates are already cached in the MinkowskiEngine, we could\n reuse the same coordinates by simply providing the coordinate hash\n key. In most case, this process is done automatically. When you\n provide a `coords_key`, all other arguments will be be ignored.\n\n :attr:`coords_manager` (:attr:`MinkowskiEngine.CoordsManager`): The\n MinkowskiEngine creates a dynamic computation graph and all\n coordinates inside the same computation graph are managed by a\n CoordsManager object. If not provided, the MinkowskiEngine will\n create a new computation graph. In most cases, this process is\n handled automatically and you do not need to use this. When you use\n it, make sure you understand what you are doing.\n\n :attr:`force_creation` (:attr:`bool`): Force creation of the\n coordinates. This allows generating a new set of coordinates even\n when there exists another set of coordinates with the same\n tensor stride. This could happen when you manually feed the same\n :attr:`coords_manager`.\n\n :attr:`allow_duplicate_coords` (:attr:`bool`): Allow duplicate\n coordinates when creating the sparse tensor. Internally, it will\n generate a new unique set of coordinates and use features of at the\n corresponding unique coordinates. In general, setting\n `allow_duplicate_coords=True` is not recommended as it could hide\n obvious errors in your data loading and preprocessing steps. Please\n refer to the quantization and data loading tutorial on `here\n <https://stanfordvl.github.io/MinkowskiEngine/demo/training.html>`_\n for more details.\n\n :attr:`quantizatino_mode`\n (:attr:`MinkowskiEngine.SparseTensorQuantizationMode`): Defines the\n quantization method and how to define features of a sparse tensor.\n Please refer to :attr:`SparseTensorQuantizationMode` for details.\n\n :attr:`tensor_stride` (:attr:`int`, :attr:`list`,\n :attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride\n of the current sparse tensor. By default, it is 1.\n\n \"\"\"\n assert isinstance(feats,\n torch.Tensor), \"Features must be a torch.Tensor\"\n assert feats.ndim == 2, f\"The feature should be a matrix, The input feature is an order-{feats.ndim} tensor.\"\n assert isinstance(quantization_mode, SparseTensorQuantizationMode)\n self.quantization_mode = quantization_mode\n\n if coords is None and coords_key is None:\n raise ValueError('Either coords or coords_key must be provided')\n\n if coords_key is None:\n assert coords_manager is not None or coords is not None\n D = -1\n if coords_manager is None:\n D = coords.size(1) - 1\n else:\n D = coords_manager.D\n coords_key = CoordsKey(D)\n coords_key.setTensorStride(convert_to_int_list(tensor_stride, D))\n else:\n assert isinstance(coords_key, CoordsKey)\n\n if coords is not None:\n assert isinstance(coords, torch.Tensor), \\\n \"Coordinate must be of type torch.Tensor\"\n\n if not isinstance(coords, torch.IntTensor):\n warnings.warn(\n 'Coords implicitly converted to torch.IntTensor. ' +\n 'To remove this warning, use `.int()` to convert the ' +\n 'coords into an torch.IntTensor')\n coords = torch.floor(coords).int()\n\n if coords.device.type != 'cpu':\n warnings.warn(\n 'Coords implicitly converted to CPU type. ' +\n 'To remove this warning, use `.cpu()` to convert the ' +\n 'coords into a CPU type')\n coords = coords.cpu()\n\n assert feats.shape[0] == coords.shape[0], \\\n \"The number of rows in features and coordinates do not match.\"\n\n coords = coords.contiguous()\n\n ##########################\n # Setup CoordsManager\n ##########################\n if coords_manager is None:\n # If set to share the coords man, use the global coords man\n global _sparse_tensor_operation_mode, _global_coords_man\n if _sparse_tensor_operation_mode == SparseTensorOperationMode.SHARE_COORDS_MANAGER:\n if _global_coords_man is None:\n _global_coords_man = CoordsManager(\n memory_manager_backend=memory_manager_backend,\n D=coords.size(1) - 1)\n coords_manager = _global_coords_man\n else:\n assert coords is not None, \"Initial coordinates must be given\"\n coords_manager = CoordsManager(D=coords.size(1) - 1)\n\n else:\n assert isinstance(coords_manager, CoordsManager)\n\n ##########################\n # Initialize coords\n ##########################\n if not coords_key.isKeySet() and coords is not None and len(coords) > 0:\n if quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE:\n force_remap = True\n return_inverse = False\n elif quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE:\n force_remap = True\n return_inverse = True\n\n self.unique_index, self.inverse_mapping = coords_manager.initialize(\n coords,\n coords_key,\n force_creation=force_creation,\n force_remap=force_remap,\n allow_duplicate_coords=allow_duplicate_coords,\n return_inverse=return_inverse)\n\n if quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE:\n self._CF = feats\n self._CC = coords\n feats = MEB.quantization_average_features(\n feats, torch.arange(len(feats)), self.inverse_mapping,\n len(self.unique_index), 0)\n coords = coords[self.unique_index]\n elif force_remap:\n assert len(self.unique_index) > 0\n self._CC = coords\n self._CF = feats\n coords = coords[self.unique_index]\n feats = feats[self.unique_index]\n\n elif coords is not None: # empty / invalid coords\n assert isinstance(coords, torch.IntTensor)\n assert coords.ndim == 2\n coords_manager.initialize(\n coords,\n coords_key,\n force_creation=force_creation,\n force_remap=False,\n allow_duplicate_coords=False,\n return_inverse=False)\n elif coords_key is not None:\n assert coords_key.isKeySet()\n\n self._F = feats.contiguous()\n self._C = coords\n self.coords_key = coords_key\n self.coords_man = coords_manager\n\n @property\n def tensor_stride(self):\n return self.coords_key.getTensorStride()\n\n @tensor_stride.setter\n def tensor_stride(self, p):\n r\"\"\"\n This function is not recommended to be used directly.\n \"\"\"\n p = convert_to_int_list(p, self.D)\n self.coords_key.setTensorStride(p)\n\n def _get_coords(self):\n return self.coords_man.get_coords(self.coords_key)\n\n @property\n def C(self):\n r\"\"\"The alias of :attr:`coords`.\n \"\"\"\n return self.coords\n\n @property\n def coords(self):\n r\"\"\"\n The coordinates of the current sparse tensor. The coordinates are\n represented as a :math:`N \\times (D + 1)` dimensional matrix where\n :math:`N` is the number of points in the space and :math:`D` is the\n dimension of the space (e.g. 3 for 3D, 4 for 3D + Time). Additional\n dimension of the column of the matrix C is for batch indices which is\n internally treated as an additional spatial dimension to disassociate\n different instances in a batch.\n \"\"\"\n if self._C is None:\n self._C = self._get_coords()\n return self._C\n\n @property\n def decomposed_coordinates(self):\n r\"\"\"Returns a list of coordinates per batch.\n\n Returns a list of torch.IntTensor :math:`C \\in \\mathcal{R}^{N_i\n \\times D}` coordinates per batch where :math:`N_i` is the number of non\n zero elements in the :math:`i`th batch index in :math:`D` dimensional\n space.\n \"\"\"\n row_inds_list = self.coords_man.get_row_indices_per_batch(\n self.coords_key)\n return [self.C[row_inds, 1:] for row_inds in row_inds_list]\n\n def coordinates_at(self, batch_index):\n r\"\"\"Return coordinates at the specified batch index.\n\n Returns a torch.IntTensor :math:`C \\in \\mathcal{R}^{N_i\n \\times D}` coordinates at the specified batch index where :math:`N_i`\n is the number of non zero elements in the :math:`i`th batch index in\n :math:`D` dimensional space.\n \"\"\"\n row_inds = self.coords_man.get_row_indices_at(self.coords_key,\n batch_index)\n return self.C[row_inds, 1:]\n\n @property\n def F(self):\n r\"\"\"The alias of :attr:`feats`.\n \"\"\"\n return self._F\n\n @property\n def feats(self):\n r\"\"\"\n The features of the current sparse tensor. The features are :math:`N\n \\times D_F` where :math:`N` is the number of points in the space and\n :math:`D_F` is the dimension of each feature vector. Please refer to\n :attr:`coords` to access the associated coordinates.\n \"\"\"\n return self._F\n\n @property\n def decomposed_features(self):\n r\"\"\"Returns a list of features per batch.\n\n Returns a list of torch.Tensor :math:`C \\in \\mathcal{R}^{N_i\n \\times N_F}` features per batch where :math:`N_i` is the number of non\n zero elements in the :math:`i`th batch index in :math:`D` dimensional\n space.\n \"\"\"\n row_inds_list = self.coords_man.get_row_indices_per_batch(\n self.coords_key)\n return [self._F[row_inds] for row_inds in row_inds_list]\n\n def features_at(self, batch_index):\n r\"\"\"Returns a feature matrix at the specified batch index.\n\n Returns a torch.Tensor :math:`C \\in \\mathcal{R}^{N\n \\times N_F}` feature matrix :math:`N` is the number of non\n zero elements in the specified batch index and :math:`N_F` is the\n number of channels.\n \"\"\"\n row_inds = self.coords_man.get_row_indices_at(self.coords_key,\n batch_index)\n return self._F[row_inds]\n\n def coordinates_and_features_at(self, batch_index):\n r\"\"\"Returns a coordinate and feature matrix at the specified batch index.\n\n Returns a coordinate and feature matrix at the specified `batch_index`.\n The coordinate matrix is a torch.IntTensor :math:`C \\in \\mathcal{R}^{N\n \\times D}` where :math:`N` is the number of non zero elements in the\n specified batch index in :math:`D` dimensional space. The feature\n matrix is a torch.Tensor :math:`C \\in \\mathcal{R}^{N \\times N_F}`\n matrix :math:`N` is the number of non zero elements in the specified\n batch index and :math:`N_F` is the number of channels.\n \"\"\"\n row_inds = self.coords_man.get_row_indices_at(self.coords_key,\n batch_index)\n return self.C[row_inds, 1:], self._F[row_inds]\n\n @property\n def decomposed_coordinates_and_features(self):\n r\"\"\"Returns a list of coordinates and a list of features per batch.abs\n\n \"\"\"\n row_inds_list = self.coords_man.get_row_indices_per_batch(\n self.coords_key)\n return [self.C[row_inds, 1:] for row_inds in row_inds_list], \\\n [self._F[row_inds] for row_inds in row_inds_list]\n\n @property\n def D(self):\n r\"\"\"\n The spatial dimension of the sparse tensor. This is equal to the number\n of columns of :attr:`C` minus 1.\n \"\"\"\n return self.coords_key.D\n\n @property\n def dimension(self):\n r\"\"\"Alias of attr:`D`\n \"\"\"\n return self.D\n\n @property\n def requires_grad(self):\n return self._F.requires_grad\n\n def requires_grad_(self, requires_grad: bool = True):\n self._F.requires_grad_(requires_grad)\n\n def float(self):\n self._F = self._F.float()\n\n def double(self):\n self._F = self._F.double()\n\n def set_tensor_stride(self, s):\n ss = convert_to_int_list(s, self.D)\n self.coords_key.setTensorStride(ss)\n\n def __repr__(self):\n return self.__class__.__name__ + '(' + os.linesep \\\n + ' Coords=' + str(self.C) + os.linesep \\\n + ' Feats=' + str(self.F) + os.linesep \\\n + ' coords_key=' + str(self.coords_key) \\\n + ' tensor_stride=' + str(self.coords_key.getTensorStride()) + os.linesep \\\n + ' coords_man=' + str(self.coords_man) \\\n + ' spatial dimension=' + str(self.D) + ')'\n\n def __len__(self):\n return len(self._F)\n\n def size(self):\n return self._F.size()\n\n @property\n def shape(self):\n return self._F.shape\n\n def to(self, device):\n self._F = self._F.to(device)\n return self\n\n def cpu(self):\n self._F = self._F.cpu()\n return self\n\n @property\n def device(self):\n return self._F.device\n\n @property\n def dtype(self):\n return self._F.dtype\n\n def get_device(self):\n return self._F.get_device()\n\n # Operation overloading\n def __iadd__(self, other):\n assert isinstance(other, SparseTensor)\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n assert self.coords_key == other.coords_key, COORDS_KEY_DIFFERENT_ERROR\n\n self._F += other.F\n return self\n\n def __isub__(self, other):\n assert isinstance(other, SparseTensor)\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n assert self.coords_key == other.coords_key, COORDS_KEY_DIFFERENT_ERROR\n\n self._F -= other.F\n return self\n\n def __imul__(self, other):\n assert isinstance(other, SparseTensor)\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n assert self.coords_key == other.coords_key, COORDS_KEY_DIFFERENT_ERROR\n\n self._F *= other.F\n return self\n\n def __idiv__(self, other):\n assert isinstance(other, SparseTensor)\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n assert self.coords_key == other.coords_key, COORDS_KEY_DIFFERENT_ERROR\n\n self._F /= other.F\n return self\n\n def __add__(self, other):\n r\"\"\"\n Add its feature with the corresponding feature of the other\n :attr:`MinkowskiEngine.SparseTensor` or a :attr:`torch.Tensor`\n element-wise. For coordinates that exist on one sparse tensor but not\n on the other, features of the counterpart that do not exist will be set\n to 0.\n \"\"\"\n assert isinstance(other, (SparseTensor, torch.Tensor))\n if isinstance(other, SparseTensor):\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n\n if self.coords_key == other.coords_key:\n return SparseTensor(\n self._F + other.F,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n else:\n # Generate union maps\n out_key = CoordsKey(self.coords_man.D)\n ins, outs = self.coords_man.get_union_map(\n (self.coords_key, other.coords_key), out_key)\n N_out = self.coords_man.get_coords_size_by_coords_key(out_key)\n out_F = torch.zeros((N_out, self._F.size(1)),\n dtype=self.dtype,\n device=self.device)\n out_F[outs[0]] = self._F[ins[0]]\n out_F[outs[1]] += other._F[ins[1]]\n return SparseTensor(\n out_F, coords_key=out_key, coords_manager=self.coords_man)\n else: # when it is a torch.Tensor\n return SparseTensor(\n self._F + other,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n\n def __sub__(self, other):\n r\"\"\"\n Subtract the feature of the other :attr:`MinkowskiEngine.SparseTensor`\n or a :attr:`torch.Tensor` from its corresponding feature element-wise.\n For coordinates that exist on one sparse tensor but not on the other,\n features of the counterpart that do not exist will be set to 0.\n \"\"\"\n assert isinstance(other, (SparseTensor, torch.Tensor))\n if isinstance(other, SparseTensor):\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n\n if self.coords_key == other.coords_key:\n return SparseTensor(\n self._F - other.F,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n else:\n # Generate union maps\n out_key = CoordsKey(self.coords_man.D)\n ins, outs = self.coords_man.get_union_map(\n (self.coords_key, other.coords_key), out_key)\n N_out = self.coords_man.get_coords_size_by_coords_key(out_key)\n out_F = torch.zeros((N_out, self._F.size(1)),\n dtype=self.dtype,\n device=self.device)\n out_F[outs[0]] = self._F[ins[0]]\n out_F[outs[1]] -= other._F[ins[1]]\n return SparseTensor(\n out_F, coords_key=out_key, coords_manager=self.coords_man)\n\n else: # when it is a torch.Tensor\n return SparseTensor(\n self._F - other,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n\n def __mul__(self, other):\n r\"\"\"\n Multiply its feature of with the corresponding feature of the other\n :attr:`MinkowskiEngine.SparseTensor` or a :attr:`torch.Tensor`\n element-wise. For coordinates that exist on one sparse tensor but not\n on the other, features of the counterpart that do not exist will be set\n to 0.\n \"\"\"\n assert isinstance(other, (SparseTensor, torch.Tensor))\n if isinstance(other, SparseTensor):\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n\n if self.coords_key == other.coords_key:\n return SparseTensor(\n self._F * other.F,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n else:\n # Generate union maps\n out_key = CoordsKey(self.coords_man.D)\n ins, outs = self.coords_man.get_union_map(\n (self.coords_key, other.coords_key), out_key)\n N_out = self.coords_man.get_coords_size_by_coords_key(out_key)\n out_F = torch.zeros((N_out, self._F.size(1)),\n dtype=self.dtype,\n device=self.device)\n out_F[outs[0]] = self._F[ins[0]]\n out_F[outs[1]] *= other._F[ins[1]]\n return SparseTensor(\n out_F, coords_key=out_key, coords_manager=self.coords_man)\n else: # when it is a torch.Tensor\n return SparseTensor(\n self._F * other,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n\n def __truediv__(self, other):\n r\"\"\"\n Divide its feature by the corresponding feature of the other\n :attr:`MinkowskiEngine.SparseTensor` or a :attr:`torch.Tensor`\n element-wise. For coordinates that exist on one sparse tensor but not\n on the other, features of the counterpart that do not exist will be set\n to 0.\n \"\"\"\n assert isinstance(other, (SparseTensor, torch.Tensor))\n if isinstance(other, SparseTensor):\n assert self.coords_man == other.coords_man, COORDS_MAN_DIFFERENT_ERROR\n\n if self.coords_key == other.coords_key:\n return SparseTensor(\n self._F / other.F,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n else:\n # Generate union maps\n out_key = CoordsKey(self.coords_man.D)\n ins, outs = self.coords_man.get_union_map(\n (self.coords_key, other.coords_key), out_key)\n N_out = self.coords_man.get_coords_size_by_coords_key(out_key)\n out_F = torch.zeros((N_out, self._F.size(1)),\n dtype=self.dtype,\n device=self.device)\n out_F[outs[0]] = self._F[ins[0]]\n out_F[outs[1]] /= other._F[ins[1]]\n return SparseTensor(\n out_F, coords_key=out_key, coords_manager=self.coords_man)\n else: # when it is a torch.Tensor\n return SparseTensor(\n self._F / other,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n\n def __power__(self, power):\n return SparseTensor(\n self._F**power,\n coords_key=self.coords_key,\n coords_manager=self.coords_man)\n\n # Conversion functions\n def sparse(self, min_coords=None, max_coords=None, contract_coords=True):\n r\"\"\"Convert the :attr:`MinkowskiEngine.SparseTensor` to a torch sparse\n tensor.\n\n Args:\n :attr:`min_coords` (torch.IntTensor, optional): The min\n coordinates of the output sparse tensor. Must be divisible by the\n current :attr:`tensor_stride`.\n\n :attr:`max_coords` (torch.IntTensor, optional): The max coordinates\n of the output sparse tensor (inclusive). Must be divisible by the\n current :attr:`tensor_stride`.\n\n :attr:`contract_coords` (bool, optional): Given True, the output\n coordinates will be divided by the tensor stride to make features\n contiguous.\n\n Returns:\n :attr:`spare_tensor` (torch.sparse.Tensor): the torch sparse tensor\n representation of the self in `[Batch Dim, Spatial Dims..., Feature\n Dim]`. The coordinate of each feature can be accessed via\n `min_coord + tensor_stride * [the coordinate of the dense tensor]`.\n\n :attr:`min_coords` (torch.IntTensor): the D-dimensional vector\n defining the minimum coordinate of the output sparse tensor. If\n :attr:`contract_coords` is True, the :attr:`min_coords` will also\n be contracted.\n\n :attr:`tensor_stride` (torch.IntTensor): the D-dimensional vector\n defining the stride between tensor elements.\n\n \"\"\"\n\n if min_coords is not None:\n assert isinstance(min_coords, torch.IntTensor)\n assert min_coords.numel() == self.D\n if max_coords is not None:\n assert isinstance(max_coords, torch.IntTensor)\n assert min_coords.numel() == self.D\n\n def torch_sparse_Tensor(coords, feats, size=None):\n if size is None:\n if feats.dtype == torch.float64:\n return torch.sparse.DoubleTensor(coords, feats)\n elif feats.dtype == torch.float32:\n return torch.sparse.FloatTensor(coords, feats)\n else:\n raise ValueError('Feature type not supported.')\n else:\n if feats.dtype == torch.float64:\n return torch.sparse.DoubleTensor(coords, feats, size)\n elif feats.dtype == torch.float32:\n return torch.sparse.FloatTensor(coords, feats, size)\n else:\n raise ValueError('Feature type not supported.')\n\n # Use int tensor for all operations\n tensor_stride = torch.IntTensor(self.tensor_stride)\n\n # New coordinates\n coords = self.C\n coords, batch_indices = coords[:, 1:], coords[:, 0]\n\n # TODO, batch first\n if min_coords is None:\n min_coords, _ = coords.min(0, keepdim=True)\n elif min_coords.ndim == 1:\n min_coords = min_coords.unsqueeze(0)\n\n assert (min_coords % tensor_stride).sum() == 0, \\\n \"The minimum coordinates must be divisible by the tensor stride.\"\n\n if max_coords is not None:\n if max_coords.ndim == 1:\n max_coords = max_coords.unsqueeze(0)\n assert (max_coords % tensor_stride).sum() == 0, \\\n \"The maximum coordinates must be divisible by the tensor stride.\"\n\n coords -= min_coords\n\n if coords.ndim == 1:\n coords = coords.unsqueeze(1)\n if batch_indices.ndim == 1:\n batch_indices = batch_indices.unsqueeze(1)\n\n # return the contracted tensor\n if contract_coords:\n coords = coords // tensor_stride\n if max_coords is not None:\n max_coords = max_coords // tensor_stride\n min_coords = min_coords // tensor_stride\n\n new_coords = torch.cat((batch_indices, coords), dim=1).long()\n\n size = None\n if max_coords is not None:\n size = max_coords - min_coords + 1 # inclusive\n # Squeeze to make the size one-dimensional\n size = size.squeeze()\n\n max_batch = max(self.coords_man.get_batch_indices())\n size = torch.Size([max_batch + 1, *size, self.F.size(1)])\n\n sparse_tensor = torch_sparse_Tensor(new_coords.t().to(self.F.device),\n self.F, size)\n tensor_stride = torch.IntTensor(self.tensor_stride)\n return sparse_tensor, min_coords, tensor_stride\n\n def dense(self, min_coords=None, max_coords=None, contract_coords=True):\n r\"\"\"Convert the :attr:`MinkowskiEngine.SparseTensor` to a torch dense\n tensor.\n\n Args:\n :attr:`min_coords` (torch.IntTensor, optional): The min\n coordinates of the output sparse tensor. Must be divisible by the\n current :attr:`tensor_stride`.\n\n :attr:`max_coords` (torch.IntTensor, optional): The max coordinates\n of the output sparse tensor (inclusive). Must be divisible by the\n current :attr:`tensor_stride`.\n\n :attr:`contract_coords` (bool, optional): Given True, the output\n coordinates will be divided by the tensor stride to make features\n contiguous.\n\n Returns:\n :attr:`spare_tensor` (torch.sparse.Tensor): the torch sparse tensor\n representation of the self in `[Batch Dim, Feature Dim, Spatial\n Dim..., Spatial Dim]`. The coordinate of each feature can be\n accessed via `min_coord + tensor_stride * [the coordinate of the\n dense tensor]`.\n\n :attr:`min_coords` (torch.IntTensor): the D-dimensional vector\n defining the minimum coordinate of the output sparse tensor. If\n :attr:`contract_coords` is True, the :attr:`min_coords` will also\n be contracted.\n\n :attr:`tensor_stride` (torch.IntTensor): the D-dimensional vector\n defining the stride between tensor elements.\n\n \"\"\"\n if min_coords is not None:\n assert isinstance(min_coords, torch.IntTensor)\n assert min_coords.numel() == self.D\n if max_coords is not None:\n assert isinstance(max_coords, torch.IntTensor)\n assert min_coords.numel() == self.D\n\n # Use int tensor for all operations\n tensor_stride = torch.IntTensor(self.tensor_stride)\n\n # New coordinates\n coords = self.C\n coords, batch_indices = coords[:, 1:], coords[:, 0]\n\n # TODO, batch first\n if min_coords is None:\n min_coords, _ = coords.min(0, keepdim=True)\n elif min_coords.ndim == 1:\n min_coords = min_coords.unsqueeze(0)\n\n assert (min_coords % tensor_stride).sum() == 0, \\\n \"The minimum coordinates must be divisible by the tensor stride.\"\n\n if max_coords is not None:\n if max_coords.ndim == 1:\n max_coords = max_coords.unsqueeze(0)\n assert (max_coords % tensor_stride).sum() == 0, \\\n \"The maximum coordinates must be divisible by the tensor stride.\"\n\n coords -= min_coords\n\n if coords.ndim == 1:\n coords = coords.unsqueeze(1)\n\n # return the contracted tensor\n if contract_coords:\n coords = coords // tensor_stride\n if max_coords is not None:\n max_coords = max_coords // tensor_stride\n min_coords = min_coords // tensor_stride\n\n size = None\n nchannels = self.F.size(1)\n max_batch = max(self.coords_man.get_batch_indices())\n if max_coords is not None:\n size = max_coords - min_coords + 1 # inclusive\n # Squeeze to make the size one-dimensional\n size = size.squeeze()\n size = torch.Size([max_batch + 1, nchannels, *size])\n else:\n size = coords.max(0)[0] + 1\n size = torch.Size([max_batch + 1, nchannels, *size.numpy()])\n\n dense_F = torch.zeros(size, dtype=self.F.dtype, device=self.F.device)\n\n tcoords = coords.t().long()\n batch_indices = batch_indices.long()\n exec(\"dense_F[batch_indices, :, \" +\n \", \".join([f\"tcoords[{i}]\" for i in range(len(tcoords))]) +\n \"] = self.F\")\n\n tensor_stride = torch.IntTensor(self.tensor_stride)\n return dense_F, min_coords, tensor_stride\n\n def slice(self, X, slicing_mode=0):\n r\"\"\"\n\n Args:\n :attr:`X` (:attr:`MinkowskiEngine.SparseTensor`): a sparse tensor\n that discretized the original input.\n\n :attr:`slicing_mode`: For future updates.\n\n Returns:\n :attr:`sliced_feats` (:attr:`torch.Tensor`): the resulting feature\n matrix that slices features on the discretized coordinates to the\n original continuous coordinates that generated the input X.\n\n Example::\n\n >>> # coords, feats from a data loader\n >>> print(len(coords)) # 227742\n >>> sinput = ME.SparseTensor(coords=coords, feats=feats, quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE)\n >>> print(len(sinput)) # 161890 quantization results in fewer voxels\n >>> soutput = network(sinput)\n >>> print(len(soutput)) # 161890 Output with the same resolution\n >>> outputs = soutput.slice(sinput)\n >>> assert(outputs, torch.Tensor) # regular differentiable pytorch tensor\n >>> len(outputs) == len(coords) # recovers the original ordering and length\n \"\"\"\n # Currently only supports unweighted slice.\n return self.feats[X.inverse_mapping]\n \n def clone(self):\n self._F = self._F.clone()\n return self\n\n def features_at_coords(self, query_coords: torch.Tensor):\n r\"\"\"Extract features at the specified coordinate matrix.\n\n Args:\n :attr:`query_coords` (:attr:`torch.IntTensor`): a coordinate matrix\n of size :math:`N \\times (D + 1)` where :math:`D` is the size of the\n spatial dimension.\n\n Returns:\n :attr:`query_feats` (:attr:`torch.Tensor`): a feature matrix of size\n :math:`N \\times D_F` where :math:`D_F` is the number of channels in\n the feature. Features for the coordinates that are not found, it will be zero.\n\n :attr:`valid_rows` (:attr:`list`): a list of row indices that\n contain valid values. The rest of the rows that are not found in the\n `query_feats` will be 0.\n\n \"\"\"\n cm = self.coords_man\n\n self_key = self.coords_key\n query_key = cm.create_coords_key(query_coords)\n\n self_indices, query_indices = cm.get_kernel_map(\n self_key, query_key, kernel_size=1)\n query_feats = torch.zeros((len(query_coords), self._F.size(1)),\n dtype=self.dtype,\n device=self.device)\n\n if len(self_indices[0]) > 0:\n query_feats[query_indices[0]] = self._F[self_indices[0]]\n return query_feats, query_indices[0]\n\n\ndef _get_coords_key(\n input: SparseTensor,\n coords: Union[torch.IntTensor, CoordsKey, SparseTensor] = None,\n tensor_stride: Union[Sequence, np.ndarray, torch.IntTensor] = 1):\n r\"\"\"Process coords according to its type.\n \"\"\"\n if coords is not None:\n assert isinstance(coords, (CoordsKey, torch.IntTensor, SparseTensor))\n if isinstance(coords, torch.IntTensor):\n coords_key = input.coords_man.create_coords_key(\n coords,\n tensor_stride=tensor_stride,\n force_creation=True,\n force_remap=True,\n allow_duplicate_coords=True)\n elif isinstance(coords, SparseTensor):\n coords_key = coords.coords_key\n else: # CoordsKey type due to the previous assertion\n coords_key = coords\n else:\n coords_key = CoordsKey(input.D)\n return coords_key\n"
] |
[
[
"torch.Size",
"torch.floor",
"torch.zeros",
"torch.cat",
"torch.sparse.DoubleTensor",
"torch.IntTensor",
"torch.sparse.FloatTensor"
]
] |
Windact/cloud_detection
|
[
"9fa24a957105efe87d32ad5b05c088fa6d743000"
] |
[
"cloudnet-package/trainer/utils.py"
] |
[
"from tensorflow.keras import backend as K\nfrom tensorflow import keras\n\nsmooth = 0.0000001\n\n\ndef jacc_coef(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return 1 - ((intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + smooth))\n\n\nclass ADAMLearningRateTracker(keras.callbacks.Callback):\n \"\"\"It prints out the last used learning rate after each epoch (useful for resuming a training)\n original code: https://github.com/keras-team/keras/issues/7874#issuecomment-329347949\n \"\"\"\n\n def __init__(self, end_lr):\n super(ADAMLearningRateTracker, self).__init__()\n self.end_lr = end_lr\n\n def on_epoch_end(self, epoch, logs={}): # works only when decay in optimizer is zero\n optimizer = self.model.optimizer\n # t = K.cast(optimizer.iterations, K.floatx()) + 1\n # lr_t = K.eval(optimizer.lr * (K.sqrt(1. - K.pow(optimizer.beta_2, t)) /\n # (1. - K.pow(optimizer.beta_1, t))))\n # print('\\n***The last Actual Learning rate in this epoch is:', lr_t,'***\\n')\n print('\\n***The last Basic Learning rate in this epoch is:', K.eval(optimizer.lr), '***\\n')\n # stops the training if the basic lr is less than or equal to end_learning_rate\n if K.eval(optimizer.lr) <= self.end_lr:\n print(\"training is finished\")\n self.model.stop_training = True\n"
] |
[
[
"tensorflow.keras.backend.eval",
"tensorflow.keras.backend.sum",
"tensorflow.keras.backend.flatten"
]
] |
WarrenWeckesser/heatmapcluster
|
[
"534db493ec13b37e1c0c97d8d9f6a2f8670a89fc"
] |
[
"demo/heatmapcluster_example2.py"
] |
[
"\nimport numpy as np\nfrom scipy.cluster.hierarchy import linkage\nimport matplotlib.pyplot as plt\nfrom heatmapcluster import heatmapcluster\n\n\ndef make_data(size, seed=None):\n if seed is not None:\n np.random.seed(seed)\n\n s = np.random.gamma([7, 6, 5], [6, 8, 6], size=(size[1], 3)).T\n i = np.random.choice(range(len(s)), size=size[0])\n x = s[i]\n\n t = np.random.gamma([8, 5, 6], [3, 3, 2.1], size=(size[0], 3)).T\n j = np.random.choice(range(len(t)), size=size[1])\n\n x += 1.1*t[j].T\n\n x += 2*np.random.randn(*size)\n\n row_labels = [('R%02d' % k) for k in range(x.shape[0])]\n col_labels = [('C%02d' % k) for k in range(x.shape[1])]\n\n return x, row_labels, col_labels\n\n\nx, row_labels, col_labels = make_data(size=(64, 48), seed=123)\n\nh = heatmapcluster(x, row_labels, col_labels,\n num_row_clusters=3, num_col_clusters=0,\n label_fontsize=6,\n xlabel_rotation=-75,\n cmap=plt.cm.coolwarm,\n show_colorbar=True,\n top_dendrogram=True,\n row_linkage=lambda x: linkage(x, method='average',\n metric='correlation'),\n col_linkage=lambda x: linkage(x.T, method='average',\n metric='correlation'),\n histogram=True)\n\nplt.show()\n"
] |
[
[
"numpy.random.seed",
"numpy.random.randn",
"numpy.random.gamma",
"scipy.cluster.hierarchy.linkage",
"matplotlib.pyplot.show"
]
] |
whigy/chair-gan
|
[
"8144b34919a7c61487edc559738801b341a70331"
] |
[
"tools/process.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport argparse\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport tfimage as im\nimport threading\nimport time\nimport cv2\nfrom skimage.morphology import thin\n\nedge_pool = None\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input_dir\", required=True, help=\"path to folder containing images\")\nparser.add_argument(\"--output_dir\", required=True, help=\"output path\")\nparser.add_argument(\"--operation\", required=True, choices=[\"grayscale\", \"resize\", \"blank\", \"combine\", \"edges\", \"skeletonize\"])\nparser.add_argument(\"--workers\", type=int, default=1, help=\"number of workers\")\n# resize\nparser.add_argument(\"--pad\", action=\"store_true\", help=\"pad instead of crop for resize operation\")\nparser.add_argument(\"--size\", type=int, default=256, help=\"size to use for resize operation\")\n# combine\nparser.add_argument(\"--b_dir\", type=str, help=\"path to folder containing B images for combine operation\")\n# edges\nparser.add_argument(\"--crop\", action=\"store_true\", help=\"crop the image before edge detection. Only works when background is white.\")\nparser.add_argument(\"--crop_dir\", help=\"path for cropped original images\")\n\na = parser.parse_args()\n\ndef resize(src):\n height, width, _ = src.shape\n dst = src\n if height != width:\n if a.pad:\n size = max(height, width)\n # pad to correct ratio\n oh = (size - height) // 2\n ow = (size - width) // 2\n dst = im.pad(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)\n else:\n # crop to correct ratio\n size = min(height, width)\n oh = (height - size) // 2\n ow = (width - size) // 2\n dst = im.crop(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)\n\n assert(dst.shape[0] == dst.shape[1])\n\n size, _, _ = dst.shape\n if size > a.size:\n dst = im.downscale(images=dst, size=[a.size, a.size])\n elif size < a.size:\n dst = im.upscale(images=dst, size=[a.size, a.size])\n return dst\n\n\ndef blank(src):\n height, width, _ = src.shape\n if height != width:\n raise Exception(\"non-square image\")\n\n image_size = width\n size = int(image_size * 0.3)\n offset = int(image_size / 2 - size / 2)\n\n dst = src\n dst[offset:offset + size,offset:offset + size,:] = np.ones([size, size, 3])\n return dst\n\n\ndef combine(src, src_path):\n if a.b_dir is None:\n raise Exception(\"missing b_dir\")\n\n # find corresponding file in b_dir, could have a different extension\n basename, _ = os.path.splitext(os.path.basename(src_path))\n for ext in [\".png\", \".jpg\"]:\n sibling_path = os.path.join(a.b_dir, basename + ext)\n if tf.io.gfile.exists(sibling_path):\n sibling = im.load(sibling_path)\n break\n else:\n raise Exception(\"could not find sibling image for \" + src_path)\n\n # make sure that dimensions are correct\n height, width, _ = src.shape\n if height != sibling.shape[0] or width != sibling.shape[1]:\n raise Exception(\"differing sizes\")\n \n # convert both images to RGB if necessary\n if src.shape[2] == 1:\n src = im.grayscale_to_rgb(images=src)\n\n if sibling.shape[2] == 1:\n sibling = im.grayscale_to_rgb(images=sibling)\n\n # remove alpha channel\n if src.shape[2] == 4:\n src = src[:,:,:3]\n \n if sibling.shape[2] == 4:\n sibling = sibling[:,:,:3]\n\n return np.concatenate([src, sibling], axis=1)\n\n\ndef grayscale(src):\n return im.grayscale_to_rgb(images=im.rgb_to_grayscale(images=src))\n\n\ndef crop_and_resize(src, return_gray = False):\n \"\"\"\n crop edge image to discard white pad, and resize to training size\n based on: https://stackoverflow.com/questions/48395434/how-to-crop-or-remove-white-background-from-an-image\n [OBS!] only works on image with white background\n \"\"\"\n height, width, _ = src.shape\n\n # (1) Convert to gray, and threshold\n gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n th, threshed = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY_INV)\n\n # (2) Morph-op to remove noise\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))\n morphed = cv2.morphologyEx(threshed, cv2.MORPH_CLOSE, kernel)\n\n # (3) Find the max-area contour\n cnts = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n cnt = sorted(cnts, key=cv2.contourArea)[-1]\n\n # (4) Crop\n x, y, w, h = cv2.boundingRect(cnt)\n x_1 = max(x, x - 10)\n y_1 = max(y, y - 10)\n x_2 = min(x+w, width)\n y_2 = min(y+h, height)\n if return_gray:\n dst = gray[y_1:y_2, x_1:x_2]\n else:\n dst = src[y_1:y_2, x_1:x_2]\n # pad white to resize\n height = int(max(0, w - h) / 2.0)\n width = int(max(0, h - w) / 2.0)\n padded = cv2.copyMakeBorder(dst, height, height, width, width, cv2.BORDER_CONSTANT, value=[255, 255, 255])\n\n return cv2.resize(padded, (a.size, a.size), interpolation=cv2.INTER_NEAREST)\n\n\ndef edges(src):\n src = np.asarray(src * 255, np.uint8)\n if a.crop:\n src = crop_and_resize(src)\n # detect edges based on Canny Edge Dection\n edge = cv2.bitwise_not(cv2.Canny(src, 80, 130))\n dst = cv2.cvtColor(edge, cv2.COLOR_GRAY2RGB)\n if a.crop:\n return np.asarray(src/255., np.float32), dst\n else:\n return dst\n\n\ndef skeletonize_edge(src):\n # Process sketch to fit input. Only used for test input\n src = np.asarray(src * 255, np.uint8)\n # Crop the sketch and minimize white padding.\n cropped = crop_and_resize(src, return_gray=True)\n # Skeletonize the lines\n skeleton = thin(cv2.bitwise_not(cropped))\n final = np.asarray(1 - np.float32(skeleton))\n return cv2.cvtColor(final, cv2.COLOR_GRAY2BGR)\n\ndef process(src_path, dst_path):\n src = im.load(src_path)\n if a.operation == \"edges\":\n if a.crop:\n name = dst_path.split(\"/\")[-1]\n src, dst = edges(src)\n im.save(src, os.path.join(a.crop_dir, name))\n else:\n dst = edges(src)\n elif a.operation == \"grayscale\":\n dst = grayscale(src)\n elif a.operation == \"resize\":\n dst = resize(src)\n elif a.operation == \"blank\":\n dst = blank(src)\n elif a.operation == \"combine\":\n dst = combine(src, src_path)\n elif a.operation == \"skeletonize\":\n dst = skeletonize_edge(src)\n else:\n raise Exception(\"invalid operation\")\n\n im.save(dst, dst_path)\n\n\ncomplete_lock = threading.Lock()\nstart = None\nnum_complete = 0\ntotal = 0\n\ndef complete():\n global num_complete, rate, last_complete\n\n with complete_lock:\n num_complete += 1\n now = time.time()\n elapsed = now - start\n rate = num_complete / elapsed\n if rate > 0:\n remaining = (total - num_complete) / rate\n else:\n remaining = 0\n\n print(\"%d/%d complete %0.2f images/sec %dm%ds elapsed %dm%ds remaining\" % (num_complete, total, rate, elapsed // 60, elapsed % 60, remaining // 60, remaining % 60))\n\n last_complete = now\n\n\ndef main():\n if not tf.io.gfile.exists(a.output_dir):\n tf.io.gfile.makedirs(a.output_dir)\n if a.operation == \"edges\" and a.crop:\n try:\n if not tf.io.gfile.exists(a.crop_dir):\n tf.io.gfile.makedirs(a.crop_dir)\n except Exception as e:\n raise Exception(\"invalid crop_dir: {:s}\".format(e))\n\n src_paths = []\n dst_paths = []\n\n skipped = 0\n for src_path in im.find(a.input_dir):\n name, _ = os.path.splitext(os.path.basename(src_path))\n dst_path = os.path.join(a.output_dir, name + \".png\")\n if tf.io.gfile.exists(dst_path):\n skipped += 1\n else:\n src_paths.append(src_path)\n dst_paths.append(dst_path)\n \n print(\"skipping %d files that already exist\" % skipped)\n \n global total\n total = len(src_paths)\n \n print(\"processing %d files\" % total)\n\n global start\n start = time.time()\n\n\n\n if a.workers == 1:\n with tf.Session() as sess:\n for src_path, dst_path in zip(src_paths, dst_paths):\n process(src_path, dst_path)\n complete()\n else:\n queue = tf.train.input_producer(zip(src_paths, dst_paths), shuffle=False, num_epochs=1)\n dequeue_op = queue.dequeue()\n\n def worker(coord):\n with sess.as_default():\n while not coord.should_stop():\n try:\n src_path, dst_path = sess.run(dequeue_op)\n except tf.errors.OutOfRangeError:\n coord.request_stop()\n break\n\n process(src_path, dst_path)\n complete()\n\n # init epoch counter for the queue\n local_init_op = tf.local_variables_initializer()\n with tf.Session() as sess:\n sess.run(local_init_op)\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n for i in range(a.workers):\n t = threading.Thread(target=worker, args=(coord,))\n t.start()\n threads.append(t)\n \n try:\n coord.join(threads)\n except KeyboardInterrupt:\n coord.request_stop()\n coord.join(threads)\n\nmain()\n"
] |
[
[
"tensorflow.local_variables_initializer",
"tensorflow.io.gfile.exists",
"numpy.asarray",
"tensorflow.train.start_queue_runners",
"tensorflow.train.Coordinator",
"tensorflow.io.gfile.makedirs",
"numpy.ones",
"numpy.concatenate",
"numpy.float32",
"tensorflow.Session"
]
] |
xadrianzetx/msitrees
|
[
"8050ec247adb090ca6112fab05f23dcaba3bc23c"
] |
[
"msitrees/_node.py"
] |
[
"# MIT License\n\n# Copyright (c) 2020 xadrianzetx\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport uuid\nimport json\nimport numpy as np\nfrom typing import Optional\n\n\nclass MSINode:\n \"\"\"A fundamental building block of MSI tree.\n\n Parameters\n ----------\n left : MSINode, default=None\n Left branch of the node. Can be another\n decision node or leaf node. If either\n left or right branch is None, then node\n is treated as leaf node.\n\n right : MSINode, default=None\n Right branch of the node. Can be another\n decision node or leaf node. If either\n left or right branch is None, then node\n is treated as leaf node.\n\n indices : list, default=None\n List of indices indicating subset\n of data used to perform split whithin\n a node.\n\n feature : int, default=None\n Index of a feature on which optimal\n split was performed.\n\n split : float, default=None\n Feature value on which optimal\n split was performed.\n\n proba : float, default=None\n Probability of predicted class label.\n Calculated as ratio of majority class\n label to all labels present in a node.\n\n y : int, default=None\n Predicted class label. Calculated as\n majority label within a node.\n\n Attributes\n ----------\n id : str\n Unique node id.\n\n Notes\n -----\n For internal use only.\n \"\"\"\n\n def __init__(self, left: Optional['MSINode'] = None,\n right: Optional['MSINode'] = None,\n indices: Optional[list] = None,\n feature: Optional[int] = None,\n split: Optional[float] = None,\n proba: Optional[np.ndarray] = None,\n y: Optional[int] = None):\n self.id = uuid.uuid4().hex\n self.left = left\n self.right = right\n self.indices = indices\n self.feature = feature\n self.split = split\n self.proba = proba\n self.y = y\n\n def __repr__(self):\n num_nodes = self.count_tree_nodes(leaf_only=False)\n return 'Tree root/node with {} children'.format(num_nodes)\n\n def __str__(self):\n r = self._get_tree_structure()\n return json.dumps(r)\n\n def _get_tree_structure(self) -> dict:\n \"\"\"Recursively builds dict tree representation\"\"\"\n if self.y is not None:\n return {'leaf': self.y}\n\n else:\n node_left = self.left._get_tree_structure()\n node_right = self.right._get_tree_structure()\n return {\n 'feature': self.feature,\n 'split': self.split,\n 'left': node_left,\n 'right': node_right\n }\n\n def reset(self) -> None:\n \"\"\"Resets all node attributes to None, except id\"\"\"\n attrs = [k for k in self.__dict__.keys() if k != 'id']\n for attr in attrs:\n setattr(self, attr, None)\n\n def set_split_criteria(self, feature: int, split: float) -> None:\n \"\"\"Sets feature index and split point.\n\n Parameters\n ----------\n feature : int\n Index of a feature on which optimal\n split was performed.\n\n split : float\n Feature value on which optimal\n split was performed\n \"\"\"\n\n self.feature = feature\n self.split = split\n\n def count_tree_nodes(self, leaf_only: bool) -> int:\n \"\"\"Counts number of leaf nodes or total number\n of child nodes for current node.\n\n Parameters\n ----------\n leaf_only : bool\n When true only leaf nodes are counted,\n otherwise both decision nodes and leaf\n nodes are.\n\n Returns\n -------\n total : int\n Number of nodes\n \"\"\"\n\n if self.y is not None:\n return 1\n\n lcount = self.left.count_tree_nodes(leaf_only) if self.left else 0\n rcount = self.right.count_tree_nodes(leaf_only) if self.right else 0\n total = lcount + rcount\n\n return total if leaf_only else total + 1\n\n def count_nodes_to_bottom(self) -> int:\n \"\"\"Return total depth of a tree including\n current node.\n\n Returns\n -------\n count : int\n Maximum depth of tree\n \"\"\"\n\n if self.y is not None:\n return 1\n\n lcount = self.left.count_nodes_to_bottom() if self.left else 0\n rcount = self.right.count_nodes_to_bottom() if self.right else 0\n\n return max([lcount, rcount]) + 1\n\n def get_node_by_id(self, id: str) -> 'MSINode':\n \"\"\"Retrieves node with specified id.\n\n Parameters\n ----------\n id : str\n Node id.\n\n Returns\n -------\n node : MSINode\n Node with specified id. If such node\n does not exist, then returns None\n \"\"\"\n\n if self.id == id:\n return self\n\n ncl = self.left.get_node_by_id(id) if self.left else None\n ncr = self.right.get_node_by_id(id) if self.right else None\n\n return ncl or ncr\n\n def predict(self, x: np.ndarray) -> tuple:\n \"\"\"Predicts class label and probability\n for input x.\n\n Returns\n -------\n pred : tuple\n Tuple with predicted values for input.\n Position 0 is class label, position 1\n is class probability.\n \"\"\"\n\n if self.y is not None:\n return (self.y, self.proba)\n\n testpt = x if np.isscalar(x) else x[self.feature]\n\n if testpt < self.split:\n pred = self.left.predict(x)\n\n else:\n pred = self.right.predict(x)\n\n return pred\n"
] |
[
[
"numpy.isscalar"
]
] |
sheikheddy/aus-files
|
[
"0c38d15d560ccbb8231c8ef210916ea94a0f004b"
] |
[
"Data/Lab7/test.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef graph_results(file_name):\n data = np.genfromtxt(file_name, delimiter=',', names=['size','time'])\n plt.plot(data['size'], data['time'])\n plt.xlabel('Size of input N')\n plt.ylabel('Time taken t')\n return "
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel",
"numpy.genfromtxt"
]
] |
jorisvandenbossche/kartothek
|
[
"18b11e7b060bb778668ffc4e2f468910120e6385"
] |
[
"kartothek/io/dask/_shuffle.py"
] |
[
"from functools import partial\nfrom typing import List, Optional, Sequence, Union\n\nimport dask.array as da\nimport dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\n\nfrom kartothek.core.typing import StoreFactory\nfrom kartothek.io.dask.compression import pack_payload, unpack_payload_pandas\nfrom kartothek.io_components.metapartition import MetaPartition\nfrom kartothek.io_components.write import write_partition\nfrom kartothek.serialization import DataFrameSerializer\n\n_KTK_HASH_BUCKET = \"__KTK_HASH_BUCKET\"\n\n\ndef _hash_bucket(df: pd.DataFrame, subset: Optional[Sequence[str]], num_buckets: int):\n \"\"\"\n Categorize each row of `df` based on the data in the columns `subset`\n into `num_buckets` values. This is based on `pandas.util.hash_pandas_object`\n \"\"\"\n\n if not subset:\n subset = df.columns\n hash_arr = pd.util.hash_pandas_object(df[subset], index=False)\n buckets = hash_arr % num_buckets\n\n available_bit_widths = np.array([8, 16, 32, 64])\n mask = available_bit_widths > np.log2(num_buckets)\n bit_width = min(available_bit_widths[mask])\n return df.assign(**{_KTK_HASH_BUCKET: buckets.astype(f\"uint{bit_width}\")})\n\n\ndef shuffle_store_dask_partitions(\n ddf: dd.DataFrame,\n table: str,\n secondary_indices: Optional[Union[str, Sequence[str]]],\n metadata_version: int,\n partition_on: List[str],\n store_factory: StoreFactory,\n df_serializer: Optional[DataFrameSerializer],\n dataset_uuid: str,\n num_buckets: int,\n sort_partitions_by: List[str],\n bucket_by: Sequence[str],\n) -> da.Array:\n \"\"\"\n Perform a dataset update with dask reshuffling to control partitioning.\n\n The shuffle operation will perform the following steps\n\n 1. Pack payload data\n\n Payload data is serialized and compressed into a single byte value using\n ``distributed.protocol.serialize_bytes``, see also ``pack_payload``.\n\n 2. Apply bucketing\n\n Hash the column subset ``bucket_by`` and distribute the hashes in\n ``num_buckets`` bins/buckets. Internally every bucket is identified by an\n integer and we will create one physical file for every bucket ID. The\n bucket ID is not exposed to the user and is dropped after the shuffle,\n before the store. This is done since we do not want to guarantee at the\n moment, that the hash function remains stable.\n\n 3. Perform shuffle (dask.DataFrame.groupby.apply)\n\n The groupby key will be the combination of ``partition_on`` fields and the\n hash bucket ID. This will create a physical file for every unique tuple\n in ``partition_on + bucket_ID``. The function which is applied to the\n dataframe will perform all necessary subtask for storage of the dataset\n (partition_on, index calc, etc.).\n\n 4. Unpack data (within the apply-function)\n\n After the shuffle, the first step is to unpack the payload data since\n the follow up tasks will require the full dataframe.\n\n 5. Pre storage processing and parquet serialization\n\n We apply important pre storage processing like sorting data, applying\n final partitioning (at this time there should be only one group in the\n payload data but using the ``MetaPartition.partition_on`` guarantees the\n appropriate data structures kartothek expects are created.).\n After the preprocessing is done, the data is serialized and stored as\n parquet. The applied function will return an (empty) MetaPartition with\n indices and metadata which will then be used to commit the dataset.\n\n Returns\n -------\n\n A dask.Array holding relevant MetaPartition objects as values\n\n \"\"\"\n if ddf.npartitions == 0:\n return ddf\n\n group_cols = partition_on.copy()\n\n if num_buckets is None:\n raise ValueError(\"``num_buckets`` must not be None when shuffling data.\")\n\n meta = ddf._meta\n meta[_KTK_HASH_BUCKET] = np.uint64(0)\n ddf = ddf.map_partitions(_hash_bucket, bucket_by, num_buckets, meta=meta)\n group_cols.append(_KTK_HASH_BUCKET)\n\n unpacked_meta = ddf._meta\n\n ddf = pack_payload(ddf, group_key=group_cols)\n ddf = ddf.groupby(by=group_cols)\n ddf = ddf.apply(\n partial(\n _unpack_store_partition,\n secondary_indices=secondary_indices,\n sort_partitions_by=sort_partitions_by,\n table=table,\n dataset_uuid=dataset_uuid,\n partition_on=partition_on,\n store_factory=store_factory,\n df_serializer=df_serializer,\n metadata_version=metadata_version,\n unpacked_meta=unpacked_meta,\n ),\n meta=(\"MetaPartition\", \"object\"),\n )\n return ddf\n\n\ndef _unpack_store_partition(\n df: pd.DataFrame,\n secondary_indices: List[str],\n sort_partitions_by: List[str],\n table: str,\n dataset_uuid: str,\n partition_on: Optional[List[str]],\n store_factory: StoreFactory,\n df_serializer: DataFrameSerializer,\n metadata_version: int,\n unpacked_meta: pd.DataFrame,\n) -> MetaPartition:\n \"\"\"Unpack payload data and store partition\"\"\"\n df = unpack_payload_pandas(df, unpacked_meta)\n if _KTK_HASH_BUCKET in df:\n df = df.drop(_KTK_HASH_BUCKET, axis=1)\n return write_partition(\n partition_df=df,\n secondary_indices=secondary_indices,\n sort_partitions_by=sort_partitions_by,\n dataset_table_name=table,\n dataset_uuid=dataset_uuid,\n partition_on=partition_on,\n store_factory=store_factory,\n df_serializer=df_serializer,\n metadata_version=metadata_version,\n )\n"
] |
[
[
"numpy.log2",
"numpy.array",
"pandas.util.hash_pandas_object",
"numpy.uint64"
]
] |
codediaz/Opencv-image-filters
|
[
"98afae9fe50da2212cccb7bc9d48db7c41d8df05"
] |
[
"Filters/init.py"
] |
[
"import numpy as np\nimport cv2\nimport random\nfrom utils import CFEVideoConf, image_resize\nimport glob\nimport math\n\ncap = cv2.VideoCapture(0)\n\nframes_per_seconds = 20\nsave_path='saved-media/filter.mp4'\nconfig = CFEVideoConf(cap, filepath=save_path, res='480p')\n#out = cv2.VideoWriter(save_path, config.video_type, frames_per_seconds, config.dims)\n\ndef apply_invert(frame):\n return cv2.bitwise_not(frame)\n\ndef verify_alpha_channel(frame):\n try:\n frame.shape[3] # 4th position\n except IndexError:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)\n return frame\n\n\ndef apply_color_overlay(frame, \n intensity=0.2, \n blue = 0,\n green = 0,\n red = 0):\n frame = verify_alpha_channel(frame)\n frame_h, frame_w, frame_c = frame.shape\n color_bgra = (blue, green, red, 1)\n overlay = np.full((frame_h, frame_w, 4), color_bgra, dtype='uint8')\n cv2.addWeighted(overlay, intensity, frame, 1.0, 0, frame)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)\n return frame\n\ndef apply_sepia(frame, intensity=0.5):\n blue = 20\n green = 66 \n red = 112\n frame = apply_color_overlay(frame, \n intensity=intensity, \n blue=blue, green=green, red=red)\n return frame\n\n\ndef alpha_blend(frame_1, frame_2, mask):\n alpha = mask/255.0 \n blended = cv2.convertScaleAbs(frame_1*(1-alpha) + frame_2*alpha)\n return blended\n\n\ndef apply_circle_focus_blur(frame, intensity=0.2):\n frame = verify_alpha_channel(frame)\n frame_h, frame_w, frame_c = frame.shape\n y = int(frame_h/2)\n x = int(frame_w/2)\n radius = int(x/2) # int(x/2)\n center = (x,y)\n mask = np.zeros((frame_h, frame_w, 4), dtype='uint8')\n cv2.circle(mask, center, radius, (255,255,255), -1, cv2.LINE_AA)\n mask = cv2.GaussianBlur(mask, (21,21),11 )\n blured = cv2.GaussianBlur(frame, (21,21), 11)\n blended = alpha_blend(frame, blured, 255-mask)\n frame = cv2.cvtColor(blended, cv2.COLOR_BGRA2BGR)\n return frame\n\ndef apply_portrait_mode(frame):\n frame = verify_alpha_channel(frame)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n _, mask = cv2.threshold(gray, 120,255,cv2.THRESH_BINARY)\n mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGRA)\n blured = cv2.GaussianBlur(frame, (21,21), 11)\n blended = alpha_blend(frame, blured, mask)\n frame = cv2.cvtColor(blended, cv2.COLOR_BGRA2BGR)\n return frame\n\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n portrait_mode = apply_portrait_mode(frame)\n cv2.imshow('portrait_modeS', portrait_mode)\n\n circle_blur = apply_circle_focus_blur(frame)\n cv2.imshow('circle_blur', circle_blur)\n\n sepia = apply_sepia(frame.copy())\n cv2.imshow('sepia', sepia)\n\n redish_color = apply_color_overlay(frame.copy(), intensity=.5, red=230, blue=10)\n cv2.imshow('redish_color', redish_color)\n\n\n invert = apply_invert(frame)\n cv2.imshow('invert', invert)\n #cv2.imshow('frame', frame)\n if cv2.waitKey(20) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()"
] |
[
[
"numpy.zeros",
"numpy.full"
]
] |
nikhiljain-413/Hacktoberfest2021_beginner
|
[
"56b008c9ed294c3fc23b44fa13faced99948236d"
] |
[
"Python3-Learn/TIC_TAC_TOE game.py"
] |
[
"\n# AUTHOR: Hitesh Vishnoi\n# Python3 Concept: Tic Tac Toe Game !!\n# GITHUB: https://github.com/hiteshv01\n\n\nimport numpy as np \nimport random \nfrom time import sleep \n\n# Creates an empty board \ndef create_board(): \n return(np.array([[0, 0, 0], \n [0, 0, 0], \n [0, 0, 0]])) \n\n# Check for empty places on board \ndef possibilities(board): \n l = [] \n\n for i in range(len(board)): \n for j in range(len(board)): \n\n if board[i][j] == 0: \n l.append((i, j)) \n return(l) \n\n# Select a random place for the player \ndef random_place(board, player): \n selection = possibilities(board) \n current_loc = random.choice(selection) \n board[current_loc] = player \n return(board) \n\n# Checks whether the player has three \n# of their marks in a horizontal row \ndef row_win(board, player): \n for x in range(len(board)): \n win = True\n\n for y in range(len(board)): \n if board[x, y] != player: \n win = False\n continue\n\n if win == True: \n return(win) \n return(win) \n\n# Checks whether the player has three \n# of their marks in a vertical row \ndef col_win(board, player): \n for x in range(len(board)): \n win = True\n\n for y in range(len(board)): \n if board[y][x] != player: \n win = False\n continue\n\n if win == True: \n return(win) \n return(win) \n\n# Checks whether the player has three \n# of their marks in a diagonal row \ndef diag_win(board, player): \n win = True\n y = 0\n for x in range(len(board)): \n if board[x, x] != player: \n win = False\n if win: \n return win \n win = True\n if win: \n for x in range(len(board)): \n y = len(board) - 1 - x \n if board[x, y] != player: \n win = False\n return win \n\n# Evaluates whether there is \n# a winner or a tie \ndef evaluate(board): \n winner = 0\n\n for player in [1, 2]: \n if (row_win(board, player) or\n col_win(board,player) or \n diag_win(board,player)): \n\n winner = player \n\n if np.all(board != 0) and winner == 0: \n winner = -1\n return winner \n\n# Main function to start the game \ndef play_game(): \n board, winner, counter = create_board(), 0, 1\n print(board) \n sleep(2) \n\n while winner == 0: \n for player in [1, 2]: \n board = random_place(board, player) \n print(\"Board after \" + str(counter) + \" move\") \n print(board) \n sleep(2) \n counter += 1\n winner = evaluate(board) \n if winner != 0: \n break\n return(winner) \n\n# Driver Code \nprint(\"Winner is: \" + str(play_game())) \n"
] |
[
[
"numpy.all",
"numpy.array"
]
] |
johnnygreco/pymfit
|
[
"4af480771c8b9a463a3c1cf0abde1ac416649bbe"
] |
[
"pymfit/erwin_utils/imfit.py"
] |
[
"\"\"\"\nPython helper functions for imfit written by Peter Erwin.\nModified by Johnny Greco to be compatible with python 3.\n\"\"\"\nfrom __future__ import division, print_function\n\n# Code for reading in and analyzing output of imfit\n\nimport glob\nimport numpy as np\n\nfrom . import imfit_funcs as imfuncs\n\n\n# dictionary mapping imfit function short names (as found in the config/parameter file) to\n# corresponding 1-D Python functions in imfit_funcs.py, along with some useful information:\n# \"function\" = corresponding imfit_funcs.py function, if one exists\n# \"nSkip\" = the number of 2D-related parameters to skip (e.g., PA, ellipticity),\n# \"ell\" = index for ellipticity parameter, if it exists,\n# \"a\" = index or indices for semi-major-axis parameters (r_e, h, sigma, etc.)\nimfitFunctionMap = {\"Exponential\": {\"function\": imfuncs.Exponential, \"nSkip\": 2, \"ell\": 1, \"a\": [3]},\n\t\t\t\t\"Exponential_GenEllipse\": {\"function\": imfuncs.Exponential, \"nSkip\": 3, \"ell\": 1, \"a\": [4]},\n\t\t\t\t\"Sersic\": {\"function\": imfuncs.Sersic, \"nSkip\": 2, \"ell\": 1, \"a\": [4]},\n\t\t\t\t\"Sersic_GenEllipse\": {\"function\": imfuncs.Sersic, \"nSkip\": 3, \"ell\": 1, \"a\": [5]},\n\t\t\t\t\"Gaussian\": {\"function\": imfuncs.Gauss, \"nSkip\": 2, \"ell\": 1, \"a\": [3]},\n\t\t\t\t\"GaussianRing\": {\"function\": imfuncs.GaussRing, \"nSkip\": 2, \"ell\": 1, \"a\": [3,4]},\n\t\t\t\t\"GaussianRing2Side\": {\"function\": imfuncs.GaussRing2Side, \"nSkip\": 2, \"ell\": 1, \"a\": [3,4,5]},\n\t\t\t\t\"Moffat\": {\"function\": imfuncs.Moffat, \"nSkip\": 2, \"ell\": 1, \"a\": [3]},\n\t\t\t\t\"BrokenExponential\": {\"function\": imfuncs.BrokenExp, \"nSkip\": 2, \"ell\": 1, \"a\": [3,4,5]}}\n\n\n\ndef ChopComments( theLine ):\n\treturn theLine.split(\"#\")[0]\n\n\ndef GetFunctionImageNames( baseName, funcNameList ):\n\t\"\"\"Generate a list of FITS filenames as would be created by makeimage in \"--output-functions\"\n\tmode.\n\t\"\"\"\n\n\tnImages = len(funcNameList)\n\timageNameList = [ \"%s%d_%s.fits\" % (baseName, i + 1, funcNameList[i]) for i in range(nImages) ]\n\treturn imageNameList\n\n\ndef ReadImfitConfigFile( fileName, minorAxis=False, pix=0.168, getNames=False, X0=0.0 ):\n\t\"\"\"Function to read and parse an imfit-generated parameter file\n\t(or input config file) and return a tuple consisting of:\n\t(list of 1-D imfit_funcs functions, list of lists of parameters).\n\n\tpix = scale in arcsec/pixel, if desired for plotting vs radii in arcsec.\n\n\tWe assume that all functions have a center at x = 0; this can be changed by setting\n\tX0.\n\n\tReturns tuple of (functionList, trimmedParameterList)\n\tIf getNames == True:\n\t\tReturns tuple of (functionNameList, functionList, trimmedParameterList)\n\t\"\"\"\n\n\tdlines = [ line for line in open(fileName) if len(line.strip()) > 0 and line[0] != \"#\" ]\n\n\tfuncNameList = []\n\tparamMetaList = []\n\tcurrentParamList = []\n\tnLines = len(dlines)\n\tfor line in dlines:\n\t\ttrimmedLine = ChopComments(line)\n\t\t#print(trimmedLine)\n\t\tif trimmedLine.find(\"X0\") == 0:\n\t\t\tcontinue\n\t\tif trimmedLine.find(\"Y0\") == 0:\n\t\t\tcontinue\n\t\tif trimmedLine.find(\"FUNCTION\") == 0:\n\t\t\t# if this isn't the first function, store the previous set of parameters\n\t\t\tif len(currentParamList) > 0:\n\t\t\t\tparamMetaList.append(currentParamList)\n\t\t\t# make a new parameter list for the new function\n\t\t\tcurrentParamList = [X0]\n\t\t\tpp = trimmedLine.split()\n\t\t\tfname = pp[1].strip()\n\t\t\tfuncNameList.append(fname)\n\t\t\tcontinue\n\t\telse:\n\t\t\tpp = trimmedLine.split()\n\t\t\tnewValue = float(pp[1])\n\t\t\tcurrentParamList.append(newValue)\n\n\t# ensure that final set of parameters get stored:\n\tparamMetaList.append(currentParamList)\n\n\t# process function list to remove unneeded parameters (and convert size measures\n\t# from major-axis to minor-axis, if requested)\n\tfuncList = [ imfitFunctionMap[fname][\"function\"] for fname in funcNameList ]\n\ttrimmedParamList = []\n\tnFuncs = len(funcList)\n\tfor i in range(nFuncs):\n\t\tfname = funcNameList[i]\n\t\tnSkipParams = imfitFunctionMap[fname][\"nSkip\"]\n\t\tfullParams = paramMetaList[i]\n\t\t# calculate scaling factor for minor-axis values, if needed\n\t\tif minorAxis is True:\n\t\t\tprint(fname)\n\t\t\tellIndex = imfitFunctionMap[fname][\"ell\"]\n\t\t\tprint(ellIndex)\n\t\t\tell = fullParams[ellIndex+1]\n\t\t\tq = 1.0 - ell\n\t\telse:\n\t\t\tq = 1.0\n\t\tprint(i, fname)\n\t\tsmaIndices = imfitFunctionMap[fname][\"a\"]\n\t\t# convert length values to arcsec and/or minor-axis, if needed,\n\t\tfor smaIndex in smaIndices:\n\t\t\t# +1 to account for X0 value at beginning of parameter list\n\t\t\tfullParams[smaIndex+1] = pix*q*fullParams[smaIndex+1]\n\t\t# construct the final 1-D parameter set for this function: X0 value, followed\n\t\t# by post-2D-shape parameters\n\t\ttrimmedParams = [fullParams[0]]\n\t\ttrimmedParams.extend(fullParams[nSkipParams+1:])\n\t\ttrimmedParamList.append(trimmedParams)\n\n\n\tif getNames is True:\n\t\treturn (funcNameList, funcList, trimmedParamList)\n\telse:\n\t\treturn (funcList, trimmedParamList)\n\n\n\n\n# Code for reading output of bootstrap resampling and MCMC chains\n\ndef GetBootstrapOutput( filename ):\n\t\"\"\"Reads imfit's bootstrap-resampling output when saved using the\n\t--save-bootstrap command-line option.\n\n\tParameters\n\t----------\n\tfilename : str\n\t\tname of file with bootstrap-resampling output\n\n\tReturns\n\t-------\n\t(column_names, data_array) : tuple of (list, np.ndarray)\n\t\tcolumn_names = list of column names (strings)\n\t\tdata_array = numpy array of parameter values\n\t\t\twith shape = (n_iterations, n_parameters)\n\t\"\"\"\n\n\t# get first 100 lines\n\t# FIXME: file *could* be shorter than 100 lines; really complicated\n\t# model could have > 100 lines of header...\n\twith open(filename) as theFile:\n\t\tfirstLines = [next(theFile) for x in range(100)]\n\n\t# find header line with column names and extract column names\n\tfor i in range(len(firstLines)):\n\t\tif firstLines[i].find(\"# Bootstrap resampling output\") >= 0:\n\t\t\tcolumnNamesIndex = i + 1\n\t\t\tbreak\n\tcolumnNames = firstLines[columnNamesIndex][1:].split()\n\tfor i in range(len(columnNames)):\n\t\tif columnNames[i] == \"likelihood\":\n\t\t\tnParamColumns = i\n\t\t\tbreak\n\n\t# get the data\n\td = np.loadtxt(filename)\n\n\treturn (columnNames, d)\n\n\ndef GetSingleChain( filename, getAllColumns=False ):\n\t\"\"\"Reads a single MCMC chain output file and returns a tuple of column names\n\tand a numpy array with the data.\n\n\tParameters\n\t----------\n\tfilename : str\n\t\tname of file with MCMC output chain\n\n\tgetAllColumns: bool, optional\n\t\tif False [default], only model parameter-value columns are retrieved;\n\t\tif True, all output columns (including MCMC diagnostics) are retrieved\n\n\tReturns\n\t-------\n\t(column_names, data_array) : tuple of (list, np.ndarray)\n\t\tcolumn_names = list of column names (strings)\n\t\tdata_array = numpy array of parameter values\n\t\t\twith shape = (n_iterations, n_parameters)\n\t\"\"\"\n\n\t# get first 100 lines\n\t# FIXME: file *could* be shorter than 100 lines; really complicated\n\t# model could have > 100 lines of header...\n\twith open(filename) as theFile:\n\t\tfirstLines = [next(theFile) for x in range(100)]\n\n\t# find header line with column names and extract column names\n\tfor i in range(len(firstLines)):\n\t\tif firstLines[i].find(\"# Column Headers\") >= 0:\n\t\t\tcolumnNamesIndex = i + 1\n\t\t\tbreak\n\tcolumnNames = firstLines[columnNamesIndex][1:].split()\n\tfor i in range(len(columnNames)):\n\t\tif columnNames[i] == \"likelihood\":\n\t\t\tnParamColumns = i\n\t\t\tbreak\n\n\t# get data for all columns, or just the model parameters?\n\twhichCols = None\n\tif not getAllColumns:\n\t\twhichCols = list(range(nParamColumns))\n\t\toutputColumnNames = columnNames[:nParamColumns]\n\telse:\n\t\twhichCols = None\n\t\toutputColumnNames = columnNames\n\n\t# get the data\n\td = np.loadtxt(filename, usecols=whichCols)\n\n\treturn (outputColumnNames, d)\n\n\ndef MergeChains( fname_root, maxChains=None, getAllColumns=False, start=10000, last=None,\n\t\t\t\tsecondHalf=False ):\n\t\"\"\"\n\tReads and concatenates all MCMC output chains with filenames = fname_root.*.txt,\n\tusing data from t=start onwards. By default, all generations from each chain\n\tare extracted; this can be modified with the start, last, or secondHalf keywords.\n\n\n\tParameters\n\t----------\n\tfname_root : str\n\t\troot name of output chain files (e.g., \"mcmc_out\")\n\n\tmaxChains : int or None, optional\n\t\tmaximum number of chain files to read [default = None = read all files]\n\n\tgetAllColumns : bool, optional\n\t\tif False [default], only model parameter-value columns are retrieved;\n\t\tif True, all output columns (including MCMC diagnostics) are retrieved\n\n\tstart : int, optional\n\t\textract samples from each chain beginning with time = start\n\t\tignored if \"secondHalf\" is True or if \"last\" is not None\n\n\tlast : int or None, optional\n\t\textract last N samples from each chain\n\t\tignored if \"secondHalf\" is True\n\n\tsecondHalf : bool, optional\n\t\tif True, only the second half of each chain is extracted\n\t\tif False [default],\n\n\tReturns\n\t-------\n\t(column_names, data_array) : tuple of (list, np.ndarray)\n\t\tcolumn_names = list of column names (strings)\n\t\tdata_array = numpy array of parameter values\n\t\t\twith shape = (n_samples, n_parameters)\n\t\"\"\"\n\n\t# construct list of filenames\n\tif maxChains is None:\n\t\tglobPattern = \"{0}.*.txt\".format(fname_root)\n\t\tfilenames = glob.glob(globPattern)\n\telse:\n\t\tfilenames = [\"{0}.{1}.txt\".format(fname_root, n) for n in range(maxChains)]\n\tnFiles = len(filenames)\n\n\t# get the first chain so we can tell how long the chains are\n\t(colNames, dd) = GetSingleChain(filenames[0], getAllColumns=getAllColumns)\n\tnGenerations = dd.shape[0]\n\n\t# figure out what part of full chain to extract\n\tif secondHalf is True:\n\t\tstartTime = int(np.floor(nGenerations / 2))\n\telif last is not None:\n\t\tstartTime = -last\n\telse:\n\t\tstartTime = start\n\n\t# get first chain and column names; figure out if we get all columns or just\n\t# model parameters\n\tif (startTime >= nGenerations):\n\t\ttxt = \"WARNING: # generations in MCMC chain file {0} ({1:d}) is <= \".format(filenames[0],\n\t\t\t\tnGenerations)\n\t\ttxt += \"requested start time ({0:d})!\\n\".format(startTime)\n\t\tprint(txt)\n\t\treturn None\n\tdd_final = dd[startTime:,:]\n\tif getAllColumns is False:\n\t\tnParamColumns = len(colNames)\n\t\twhichCols = list(range(nParamColumns))\n\telse:\n\t\twhichCols = None\n\n\t# get and append rest of chains if more than 1 chain-file was requested\n\tif nFiles > 1:\n\t\tfor i in range(1, nFiles):\n\t\t\tdd_next = np.loadtxt(filenames[i], usecols=whichCols)\n\t\t\tdd_final = np.concatenate((dd_final, dd_next[startTime:,:]))\n\n\treturn (colNames, dd_final)\n\n"
] |
[
[
"numpy.concatenate",
"numpy.floor",
"numpy.loadtxt"
]
] |
faresbs/sign-language-tutor
|
[
"d4091d6cd582a80a5ad38759e973357b02731fc8"
] |
[
"cnn_architectures.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\nfrom torchvision import models\n\nimport torch.utils.model_zoo as model_zoo\n\n\nmodel_urls = {\n 'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',\n}\n\n\nclass vgg16(nn.Module):\n\n\tdef __init__(self, num_classes):\n\t\tsuper(vgg16, self).__init__()\n\t\tfeatures = list(models.vgg16().features)\n\t\tself.features = nn.ModuleList(features)\n\t\tself.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, num_classes))\n\n\n\tdef forward(self, x):\n\t\tfor feature in self.features:\n\t\t\tx = feature(x)\n\t\tx = x.view(x.size(0), -1)\n\t\tx = self.classifier(x)\n\n\t\treturn x\n\n\nclass Inception3(nn.Module):\n\n def __init__(self, num_classes=1000, channels=3, aux_logits=True, transform_input=False):\n super(Inception3, self).__init__()\n self.aux_logits = aux_logits\n self.transform_input = transform_input\n self.Conv2d_1a_3x3 = BasicConv2d(channels, 32, kernel_size=3, stride=2)\n self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)\n self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)\n self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)\n self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)\n self.Mixed_5b = InceptionA(192, pool_features=32)\n self.Mixed_5c = InceptionA(256, pool_features=64)\n self.Mixed_5d = InceptionA(288, pool_features=64)\n self.Mixed_6a = InceptionB(288)\n self.Mixed_6b = InceptionC(768, channels_7x7=128)\n self.Mixed_6c = InceptionC(768, channels_7x7=160)\n self.Mixed_6d = InceptionC(768, channels_7x7=160)\n self.Mixed_6e = InceptionC(768, channels_7x7=192)\n if aux_logits:\n self.AuxLogits = InceptionAux(768, num_classes)\n self.Mixed_7a = InceptionD(768)\n self.Mixed_7b = InceptionE(1280)\n self.Mixed_7c = InceptionE(2048)\n self.fc = nn.Linear(2048, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n import scipy.stats as stats\n stddev = m.stddev if hasattr(m, 'stddev') else 0.1\n X = stats.truncnorm(-2, 2, scale=stddev)\n values = torch.Tensor(X.rvs(m.weight.numel()))\n values = values.view(m.weight.size())\n m.weight.data.copy_(values)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n if self.transform_input:\n x = x.clone()\n x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n # 299 x 299 x 3\n x = self.Conv2d_1a_3x3(x)\n # 149 x 149 x 32\n x = self.Conv2d_2a_3x3(x)\n # 147 x 147 x 32\n x = self.Conv2d_2b_3x3(x)\n # 147 x 147 x 64\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n # 73 x 73 x 64\n x = self.Conv2d_3b_1x1(x)\n # 73 x 73 x 80\n x = self.Conv2d_4a_3x3(x)\n # 71 x 71 x 192\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n # 35 x 35 x 192\n x = self.Mixed_5b(x)\n # 35 x 35 x 256\n x = self.Mixed_5c(x)\n # 35 x 35 x 288\n x = self.Mixed_5d(x)\n # 35 x 35 x 288\n x = self.Mixed_6a(x)\n # 17 x 17 x 768\n x = self.Mixed_6b(x)\n # 17 x 17 x 768\n x = self.Mixed_6c(x)\n # 17 x 17 x 768\n x = self.Mixed_6d(x)\n # 17 x 17 x 768\n x = self.Mixed_6e(x)\n # 17 x 17 x 768\n if self.training and self.aux_logits:\n aux = self.AuxLogits(x)\n # 17 x 17 x 768\n x = self.Mixed_7a(x)\n # 8 x 8 x 1280\n x = self.Mixed_7b(x)\n # 8 x 8 x 2048\n x = self.Mixed_7c(x)\n # 8 x 8 x 2048\n x = F.avg_pool2d(x, kernel_size=8)\n # 1 x 1 x 2048\n x = F.dropout(x, training=self.training)\n # 1 x 1 x 2048\n x = x.view(x.size(0), -1)\n # 2048\n x = self.fc(x)\n # 1000 (num_classes)\n if self.training and self.aux_logits:\n return x, aux\n return x\n\n\nclass InceptionA(nn.Module):\n\n def __init__(self, in_channels, pool_features):\n super(InceptionA, self).__init__()\n self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)\n\n self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)\n self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)\n\n self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)\n self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)\n self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)\n\n self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch5x5 = self.branch5x5_1(x)\n branch5x5 = self.branch5x5_2(branch5x5)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)\n\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionB(nn.Module):\n\n def __init__(self, in_channels):\n super(InceptionB, self).__init__()\n self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)\n\n self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)\n self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)\n self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)\n\n def forward(self, x):\n branch3x3 = self.branch3x3(x)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)\n\n branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)\n\n outputs = [branch3x3, branch3x3dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionC(nn.Module):\n\n def __init__(self, in_channels, channels_7x7):\n super(InceptionC, self).__init__()\n self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)\n\n c7 = channels_7x7\n self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)\n self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))\n self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0))\n\n self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)\n self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))\n self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))\n self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))\n self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))\n\n self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch7x7 = self.branch7x7_1(x)\n branch7x7 = self.branch7x7_2(branch7x7)\n branch7x7 = self.branch7x7_3(branch7x7)\n\n branch7x7dbl = self.branch7x7dbl_1(x)\n branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)\n branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)\n\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionD(nn.Module):\n\n def __init__(self, in_channels):\n super(InceptionD, self).__init__()\n self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)\n self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)\n\n self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)\n self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))\n self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))\n self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)\n\n def forward(self, x):\n branch3x3 = self.branch3x3_1(x)\n branch3x3 = self.branch3x3_2(branch3x3)\n\n branch7x7x3 = self.branch7x7x3_1(x)\n branch7x7x3 = self.branch7x7x3_2(branch7x7x3)\n branch7x7x3 = self.branch7x7x3_3(branch7x7x3)\n branch7x7x3 = self.branch7x7x3_4(branch7x7x3)\n\n branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)\n outputs = [branch3x3, branch7x7x3, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionE(nn.Module):\n\n def __init__(self, in_channels):\n super(InceptionE, self).__init__()\n self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)\n\n self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)\n self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))\n self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))\n\n self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)\n self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)\n self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))\n self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))\n\n self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)\n\n def forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch3x3 = self.branch3x3_1(x)\n branch3x3 = [\n self.branch3x3_2a(branch3x3),\n self.branch3x3_2b(branch3x3),\n ]\n branch3x3 = torch.cat(branch3x3, 1)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = [\n self.branch3x3dbl_3a(branch3x3dbl),\n self.branch3x3dbl_3b(branch3x3dbl),\n ]\n branch3x3dbl = torch.cat(branch3x3dbl, 1)\n\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]\n return torch.cat(outputs, 1)\n\n\nclass InceptionAux(nn.Module):\n\n def __init__(self, in_channels, num_classes):\n super(InceptionAux, self).__init__()\n self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)\n self.conv1 = BasicConv2d(128, 768, kernel_size=5)\n self.conv1.stddev = 0.01\n self.fc = nn.Linear(768, num_classes)\n self.fc.stddev = 0.001\n\n def forward(self, x):\n # 17 x 17 x 768\n x = F.avg_pool2d(x, kernel_size=5, stride=3)\n # 5 x 5 x 768\n x = self.conv0(x)\n # 5 x 5 x 128\n x = self.conv1(x)\n # 1 x 1 x 768\n x = x.view(x.size(0), -1)\n # 768\n x = self.fc(x)\n # 1000\n return x\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n\n\n\ndef inception_v3(pretrained=True, **kwargs):\n if pretrained:\n if 'transform_input' not in kwargs:\n kwargs['transform_input'] = True\n model = Inception3(**kwargs)\n model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google']))\n return model\n\n\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.functional.dropout",
"torch.cat",
"torch.utils.model_zoo.load_url",
"torch.nn.ModuleList",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"scipy.stats.truncnorm",
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.functional.max_pool2d"
]
] |
IvanPy96/Imagewoof-classification-PyTorch
|
[
"0055fcad8cc98d034331c7a72be43bee28fff37d"
] |
[
"inference.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import models, transforms\n\nfrom sys import argv\nimport shutil\nimport requests\n\nfrom PIL import Image\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nscript, file_link = argv \n\nresponse = requests.get(file_link, stream=True)\nwith open('img_test.jpg', 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\ndel response\n \n\ntest_transforms = transforms.Compose([transforms.CenterCrop(320),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], \n std=[0.229, 0.224, 0.225])]\n )\n\ndevice = torch.device('cpu')\nmodel = models.resnet50(pretrained=False)\nnum_ftrs = model.fc.in_features\nmodel.fc = nn.Linear(num_ftrs, 10)\nmodel.load_state_dict(torch.load('imagewoof_ResNet50.pth', map_location=device))\nmodel.eval()\n\nclass_names = ['Shih-Tzu', 'Rhodesian ridgeback', 'Beagle', 'English foxhound', \n 'Border terrier', 'Australian terrier', 'Golden retriever', \n 'Old English sheepdog', 'Samoyed', 'Dingo']\n\n\nwith torch.no_grad():\n\n img = Image.open('img_test.jpg')\n img_t = test_transforms(img).unsqueeze(0)\n img_t = img_t.to(device)\n\n outputs = model(img_t)\n _, preds = torch.max(outputs, 1)\n prob = F.softmax(outputs, dim=1)\n top_p, top_class = prob.topk(1, dim = 1)\n\n print('Порода: {}\\nУверенность модели: {}%'.format(class_names[int(preds.cpu().numpy())], 100*round(top_p.detach().cpu().numpy().tolist()[0][0],2)))\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.max",
"torch.load",
"torch.nn.Linear",
"torch.no_grad",
"torch.device"
]
] |
imocanu/jagerml
|
[
"b0e953b9fefe4a21935a763d53015f6d85160c9f"
] |
[
"otherpy/distribute/simpleMNISTmodel.py"
] |
[
"import tensorflow as tf\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport time\n\n\ndef mnist_dataset(batch_size):\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n x_train = x_train / np.float32(255)\n y_train = y_train / np.float32(255)\n\n train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(60000).repeat().batch(batch_size)\n test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).shuffle(60000).repeat().batch(batch_size)\n return train_dataset, test_dataset\n\n\ndef mnist_dataset2(batch_size):\n mnist = tf.keras.datasets.mnist\n (train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n print(train_images.shape, test_images.shape)\n train_images = train_images[..., None]\n test_images = test_images[..., None]\n print(train_images.shape, test_images.shape)\n train_images = train_images / np.float32(255)\n test_images = test_images / np.float32(255)\n print(train_images.shape, test_images.shape)\n print(train_labels.shape, test_labels.shape)\n\n strategy = tf.distribute.MirroredStrategy()\n print(\"Nr of devices :\", strategy.num_replicas_in_sync)\n\n BUFFER_SIZE = len(train_images)\n BATCH_SIZE_PER_REPLICA = 64\n GLOBAL_BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync\n\n train_dataset = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(BUFFER_SIZE).batch(\n GLOBAL_BATCH_SIZE)\n test_dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(GLOBAL_BATCH_SIZE)\n\n return train_dataset, test_dataset\n\n\ndef build_compile_fit_cnn_model(train_dataset, test_dataset, epochs):\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Conv2D(32, 3, activation='relu'))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation='relu'))\n model.add(tf.keras.layers.Dense(10))\n\n class PrintLR(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n print('\\nLearning rate for epoch {} is {}'.format(epoch + 1,\n model.optimizer.lr.numpy()))\n\n model.compile(\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(),\n metrics=['accuracy']\n )\n\n checkpoint_dir = './training_checkpoints'\n checkpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\n\n callbacks = [\n tf.keras.callbacks.TensorBoard(log_dir='./logs'),\n tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix,\n save_weights_only=True),\n tf.keras.callbacks.EarlyStopping(verbose=1, patience=2),\n PrintLR()\n ]\n\n history_metrics = model.fit(train_dataset,\n epochs=epochs,\n validation_data=test_dataset,\n steps_per_epoch=70,\n callbacks=callbacks)\n\n # Saving the model to a path on localhost.\n saved_model_path = \"/tmp/tf_save\"\n save_options = tf.saved_model.SaveOptions(experimental_io_device='/job:localhost')\n model.save(saved_model_path, options=save_options)\n\n another_strategy = tf.distribute.MirroredStrategy()\n with another_strategy.scope():\n load_options = tf.saved_model.LoadOptions(experimental_io_device='/job:localhost')\n loaded = tf.keras.models.load_model(saved_model_path, options=load_options)\n\n return history_metrics\n\n\ndef plot_acc(history_metrics):\n rows = 2\n cols = 2\n n = rows * cols\n fig, axes = plt.subplots(rows, cols, figsize=(8, 8))\n for i, metric in enumerate(history_metrics.history):\n r = i // cols\n c = i % cols\n ax = axes[r][c]\n ax.plot(history_metrics.epoch, history_metrics.history[metric])\n ax.set_title(metric)\n\n plt.show()\n\n\nstart = time.time()\nbatch_size = 64\ntrain_dataset, test_dataset = mnist_dataset2(batch_size)\n\nprint(\"[*]TimePoint :\", time.time()- start)\nepochs = 15\nhistory = build_compile_fit_cnn_model(train_dataset, test_dataset, epochs=epochs)\nprint(\"[*]TimePoint :\", time.time()- start)\nplot_acc(history)\n"
] |
[
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.models.load_model",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Sequential",
"matplotlib.pyplot.subplots",
"tensorflow.saved_model.SaveOptions",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.saved_model.LoadOptions",
"tensorflow.keras.optimizers.Adam",
"numpy.float32",
"tensorflow.keras.callbacks.TensorBoard",
"matplotlib.pyplot.show",
"tensorflow.keras.layers.Flatten",
"tensorflow.distribute.MirroredStrategy"
]
] |
MrLIVB/BMSTU_Computational_Algorithms
|
[
"c893a5015dbbecd97ed63c848d67cbc9d1a249dc"
] |
[
"lab_05/integral.py"
] |
[
"from legendre import legendre\nimport seidel\nimport matrix\nimport numpy as np\nfrom math import sqrt, pi, e\n\n\ndef quadrature(k):\n if k % 2:\n return 0\n else:\n return 2 / (k + 1)\n\ndef integrate(a, b, n, f):\n l = legendre(n)\n\n A = np.zeros((n, n))\n B = np.zeros((n, 1))\n for k in range(n):\n row = []\n for i in range(n):\n A[k, i] = l[i] ** k\n B[k] = quadrature(k)\n\n \"\"\"D = np.linalg.inv(A)\n D = np.matrix(D)\n C = D * B\n C = C.transpose()\n Ai = np.array(C.ravel())\n Ai = nparray_to_list(Ai)[0]\"\"\"\n D = matrix.inv(A)\n Ai = matrix.multi(D, B)\n\n return (b - a) / 2 * sum(Ai[i] * f((b-a)/2 * l[i] + (a + b) / 2) for i in range(n))\n\n\n\ndef nparray_to_list(a):\n a = list(a)\n for i in range(len(a)):\n a[i] = list(a[i])\n return a"
] |
[
[
"numpy.zeros"
]
] |
obadao/multi-view-pose-estimation
|
[
"2d5ff2975180edc66628d1ba0fe0806ec177ef0c"
] |
[
"linear.py"
] |
[
"# Rahil Mehrizi, Oct 2018\n\"\"\"Creating Model to Predict 3D Poses from Multi-view 2D Joints\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\nfrom collections import OrderedDict\n\nclass LinearModel(nn.Module):\n def __init__(self, linear_size, num_joints):\n super(LinearModel, self).__init__()\n\n self.linear_size = linear_size\n self.num_joints = num_joints\n\n self.fc1 = nn.Linear(2*(self.num_joints), self.linear_size) \n self.bn1 = nn.BatchNorm1d(self.linear_size)\n\n self.fc2 = nn.Linear(self.linear_size, self.linear_size) \n self.bn2 = nn.BatchNorm1d(self.linear_size)\n\n self.fc3 = nn.Linear(self.linear_size, self.linear_size) \n self.bn3 = nn.BatchNorm1d(self.linear_size)\n\n self.fc4 = nn.Linear(self.linear_size, self.linear_size) \n self.bn4 = nn.BatchNorm1d(self.linear_size)\n\n self.fc5 = nn.Linear(self.linear_size, self.linear_size) \n self.bn5 = nn.BatchNorm1d(self.linear_size)\n\n self.fc6 = nn.Linear(self.linear_size, 3*self.num_joints) \n\n self.relu = nn.ReLU(inplace=True)\n self.drop = nn.Dropout() \n\n \"\"\"Kaiming Initialization\"\"\" \n for m in self.modules():\n if isinstance(m, nn.Linear):\n std1 = math.sqrt(2. / (m.in_features*m.out_features))\n m.weight.data.normal_(0,std1).clamp_(min=-2*std1,max=2*std1) \n std2 = math.sqrt(2. / (m.out_features))\n m.bias.data.normal_(0,std2).clamp_(min=-2*std2,max=2*std2) \n\n def forward(self, x):\n\n x = x.view(x.size(0), -1)\n\n x = self.fc1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.drop(x)\n \n #first block\n xin1 = x\n\n x = self.fc2(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.drop(x)\n\n x = self.fc3(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.drop(x)\n\n x = x + xin1\n\n #second block\n xin2 = x\n\n x = self.fc4(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.drop(x)\n\n x = self.fc5(x)\n x = self.bn5(x)\n x = self.relu(x)\n x = self.drop(x)\n\n x = x + xin2\n\n x = self.fc6(x)\n\n x = x.view(x.size(0), self.num_joints, 3)\n \n return x\n\n\ndef MakeLinearModel(linear_size, num_joints):\n model = LinearModel(linear_size, num_joints) \n return model\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d"
]
] |
billsioros/RoughML
|
[
"b4a4a94f49b568c32e232187566698c1d216207a"
] |
[
"src/roughml/data/generators.py"
] |
[
"from abc import ABC, abstractmethod\n\nimport numpy as np\nimport sympy\nfrom scipy import stats\n\nfrom roughml.plot import as_grayscale_image\n\n\nclass SurfaceGenerator(ABC):\n def __init__(\n self, n_points, rms, skewness, kurtosis, corlength_x, corlength_y, alpha\n ):\n self.n_points = n_points\n self.rms = rms\n self.skewness = skewness\n self.kurtosis = kurtosis\n self.corlength_x = corlength_x\n self.corlength_y = corlength_y\n self.alpha = alpha\n\n self._mean = 0\n self._length = 0\n\n def __str__(self):\n return f\"{self.__class__.__name__}({self.n_points}, {self.rms}, {self.skewness}, {self.kurtosis}, {self.corlength_x}, {self.corlength_y}, {self.alpha})\"\n\n def __repr__(self):\n return f\"<{self}>\"\n\n def __call__(self, length):\n self._length = length\n\n return self\n\n def __len__(self):\n return self._length\n\n def __iter__(self):\n for _ in range(self._length):\n yield self.generate_surface()\n\n def sort(self, elements):\n indices = np.argsort(elements, axis=0)\n\n return elements[indices], indices\n\n @abstractmethod\n def autocorrelation(self, tx, ty):\n raise NotImplementedError\n\n def generate_surface(self):\n # 1st step: Generation of a Gaussian surface\n\n # Determine the autocorrelation function R(tx,ty)\n R = np.zeros((self.n_points, self.n_points))\n\n txmin = -self.n_points // 2\n txmax = self.n_points // 2\n\n tymin = -self.n_points // 2\n tymax = self.n_points // 2\n\n dtx = (txmax - txmin) // self.n_points\n dty = (tymax - tymin) // self.n_points\n\n for tx in range(txmin, txmax, dtx):\n for ty in range(tymin, tymax, dty):\n R[tx + txmax, ty + tymax] = self.autocorrelation(tx, ty)\n\n # According to the Wiener-Khinchine theorem FR is the power spectrum of the desired profile\n FR = np.fft.fft2(R, s=(self.n_points, self.n_points))\n AMPR = np.sqrt(dtx ** 2 + dty ** 2) * abs(FR)\n\n # 2nd step: Generate a white noise, normalize it and take its Fourier transform\n X = np.random.rand(self.n_points, self.n_points)\n aveX = np.mean(np.mean(X))\n\n dif2X = (X - aveX) ** 2\n stdX = np.sqrt(np.mean(np.mean(dif2X)))\n X = X / stdX\n\n XF = np.fft.fft2(X, s=(self.n_points, self.n_points))\n\n # 3nd step: Multiply the two Fourier transforms\n YF = XF * np.sqrt(AMPR)\n\n # 4th step: Perform the inverse Fourier transform of YF and get the desired surface\n zaf = np.fft.ifft2(YF, s=(self.n_points, self.n_points))\n z = np.real(zaf)\n\n avez = np.mean(np.mean(z))\n dif2z = (z - avez) ** 2\n stdz = np.sqrt(np.mean(np.mean(dif2z)))\n z = ((z - avez) * self.rms) / stdz\n\n # Define the fraction of the surface to be analysed\n xmin = 0\n xmax = self.n_points\n ymin = 0\n ymax = self.n_points\n z_gs = z[xmin:xmax, ymin:ymax]\n\n # 2nd step: Generation of a non-Gaussian noise NxN\n z_ngn = stats.pearson3.rvs(\n self.skewness,\n loc=self._mean,\n scale=self.rms,\n size=(self.n_points, self.n_points),\n )\n\n # as_grayscale_image(z_ngn)\n # 3rd step: Combination of z_gs with z_ngn to output a z_ms\n v_gs = z_gs.flatten(order=\"F\")\n v_ngn = z_ngn.flatten(order=\"F\")\n\n Igs = np.argsort(v_gs)\n\n vs_ngn = np.sort(v_ngn)\n\n v_ngs = np.zeros_like(vs_ngn)\n v_ngs[Igs] = vs_ngn\n\n z_ngs = np.asmatrix(v_ngs.reshape(self.n_points, self.n_points, order=\"F\")).H\n\n return z_ngs\n\n\nclass NonGaussianSurfaceGenerator(SurfaceGenerator):\n def __init__(\n self,\n n_points=128,\n rms=1,\n skewness=0,\n kurtosis=3,\n corlength_x=4,\n corlength_y=4,\n alpha=1,\n ):\n super().__init__(\n n_points, rms, skewness, kurtosis, corlength_x, corlength_y, alpha\n )\n\n def autocorrelation(self, tx, ty):\n return (self.rms ** 2) * np.exp(\n -(\n (\n abs(\n np.sqrt(\n (tx / self.corlength_x) ** 2 + (ty / self.corlength_y) ** 2\n )\n )\n )\n ** (2 * self.alpha)\n )\n )\n\n\nclass BesselNonGaussianSurfaceGenerator(NonGaussianSurfaceGenerator):\n def __init__(\n self,\n n_points=128,\n rms=1,\n skewness=0,\n kurtosis=3,\n corlength_x=4,\n corlength_y=4,\n alpha=1,\n beta_x=1,\n beta_y=1,\n ):\n super().__init__(\n n_points, rms, skewness, kurtosis, corlength_x, corlength_y, alpha\n )\n\n self.beta_x, self.beta_y = beta_x, beta_y\n\n def autocorrelation(self, tx, ty):\n return super().autocorrelation(tx, ty) * sympy.besselj(\n 0, (2 * np.pi * np.sqrt((tx / self.beta_x) ** 2 + (ty / self.beta_y) ** 2))\n )\n\n\nif __name__ == \"__main__\":\n generate = NonGaussianSurfaceGenerator()\n\n for surface in generate(1):\n as_grayscale_image(surface)\n # as_3d_surface(surface)\n # plot_correlation(surface)\n\n besel_generate = BesselNonGaussianSurfaceGenerator(\n 128, 1, 0, 3, 16, 16, 0.5, 4000, 4000\n )\n\n for surface in besel_generate(1):\n as_grayscale_image(surface)\n # as_3d_surface(surface)\n # plot_correlation(surface)\n"
] |
[
[
"numpy.fft.fft2",
"numpy.fft.ifft2",
"numpy.sqrt",
"numpy.sort",
"numpy.real",
"scipy.stats.pearson3.rvs",
"numpy.zeros_like",
"numpy.random.rand",
"numpy.mean",
"numpy.argsort",
"numpy.zeros"
]
] |
JelleAalbers/hypney
|
[
"3e38e21743fc9babe0ed47af299d08242a9b6d32",
"3e38e21743fc9babe0ed47af299d08242a9b6d32"
] |
[
"tests/models/test_combinations.py",
"hypney/basics.py"
] |
[
"import hypney\n\nimport numpy as np\nfrom scipy import stats\n\n\ndef test_mixture():\n m1 = hypney.models.uniform(rate=40)\n m2_free = hypney.models.uniform(rate=20)\n m2_frozen = m2_free.freeze()\n m3 = hypney.models.uniform(rate=30)\n\n for m2 in m2_free, m2_frozen:\n mix = hypney.models.mixture(m1, m2)\n assert mix.rate() == 60.0\n\n assert mix.pdf(data=0) == 1.0\n assert mix.diff_rate(data=0) == 60.0\n\n np.testing.assert_array_equal(\n mix.cdf(data=[0.0, 0.5, 1.0]), np.array([0.0, 0.5, 1.0])\n )\n\n assert mix.simulate().shape[0] > 0\n assert mix.rvs(size=50).shape[0] > 0\n\n # Test forming mixtures by +\n mix2 = m1 + m2\n assert mix2.diff_rate(data=0) == 60.0\n\n mix3 = m3 + mix2\n assert mix3.diff_rate(data=0) == 90.0\n assert len(mix3.models) == 3, \"Should unpack mixtures\"\n mix4 = m1 + m2 + m3\n assert mix4.diff_rate(data=0) == 90.0\n\n # Test mean and std\n mix = hypney.models.norm() + hypney.models.uniform(loc=5, scale=2)\n data = mix.rvs(100_000)\n np.testing.assert_allclose(mix.mean(), data.mean(), rtol=0.05)\n np.testing.assert_allclose(mix.std(), data.std(), rtol=0.05)\n\n # Test parameter after renaming\n mix = m1 + m2_free\n assert mix.rate(params=dict(m0_rate=1)) == 21.0\n\n mix = m1 + m2_frozen\n assert mix.rate(params=dict(rate=1)) == 21.0\n\n m2 = m2_free\n\n # Test parameter sharing\n m_shared = hypney.models.mixture(m1, m2, m3, share=\"scale\")\n assert \"scale\" in m_shared.param_names\n assert \"scale_0\" not in m_shared.param_names\n assert \"scale_1\" not in m_shared.param_names\n assert \"scale_2\" not in m_shared.param_names\n assert m_shared(scale=2).pdf(2) == 0.5 * (m1 + m2 + m3).pdf(1)\n\n # Test vectorization\n m = hypney.models.mixture(\n hypney.models.norm(), hypney.models.norm().shift(1), share=\"loc\"\n )\n locs = np.linspace(0, 2, 10)\n np.testing.assert_almost_equal(m.pdf(0, loc=locs), [m.pdf(0, loc=x) for x in locs])\n\n\ndef test_tensor_product():\n m1 = hypney.models.uniform(rate=40)\n m2 = hypney.models.uniform(rate=20)\n m3 = hypney.models.uniform(rate=30)\n\n prod = m1 ** m2 ** m3\n\n data = np.array([[0, 0, 0], [1, 1, 1]])\n\n np.testing.assert_array_equal(prod.pdf(data=data), np.array([1, 1]))\n\n np.testing.assert_array_equal(\n prod.logpdf(data=data), stats.uniform().logpdf(0) ** 3\n )\n\n assert prod.rate() == 40.0\n data = prod.simulate()\n assert data.shape[0] > 0\n assert data.shape[1] == 3\n",
"import typing as ty\n\nimport numpy as np\nimport hypney as hp\n\n\nexport, __all__ = hp.exporter(\n also_export=[\n \"DEFAULT_RATE_PARAM\",\n \"DEFAULT_LOC_PARAM\",\n \"DEFAULT_SCALE_PARAM\",\n \"DEFAULT_OBSERVABLE\",\n \"RATE_LOC_PARAMS\",\n \"RATE_LOC_SCALE_PARAMS\",\n \"DEFAULT_CUT_TYPE\",\n \"DEFAULT_RATE_GRID\",\n ]\n)\n\n\n@export\nclass NotChanged:\n \"\"\"Default argument used where None would be ambiguous or unclear\n\n (for example, would data=None set data to None, or keep data unchanged?)\n \"\"\"\n\n pass\n\n\n@export\nclass Parameter(ty.NamedTuple):\n \"\"\"Description of a parameter: name, default, and limits\"\"\"\n\n name: str\n default: float = 0.0\n min: float = -float(\"inf\")\n max: float = float(\"inf\")\n share: bool = False # Should param be shared when building mixtures?\n anchors: tuple = tuple() # Values at which model is most accurate\n\n\nDEFAULT_RATE_PARAM = Parameter(name=\"rate\", min=0, max=float(\"inf\"), default=1.0)\nDEFAULT_LOC_PARAM = Parameter(name=\"loc\", min=-float(\"inf\"))\nDEFAULT_SCALE_PARAM = Parameter(name=\"scale\", min=0, max=float(\"inf\"), default=1.0)\n\nRATE_LOC_PARAMS = (DEFAULT_RATE_PARAM, DEFAULT_LOC_PARAM)\nRATE_LOC_SCALE_PARAMS = RATE_LOC_PARAMS + (DEFAULT_SCALE_PARAM,)\n\n# open, halfopen, or closed\nDEFAULT_CUT_TYPE = \"halfopen\"\n\n\n@export\nclass Observable(ty.NamedTuple):\n \"\"\"Description of a observable space: name and limits\"\"\"\n\n name: str\n min: float = -float(\"inf\")\n max: float = float(\"inf\")\n # Whether only integer values are allowed\n integer: bool = False\n\n\nDEFAULT_OBSERVABLE = Observable(name=\"x\", min=-float(\"inf\"), max=float(\"inf\"))\n\n\n##\n# Create a sensible anchor/interpolation grid for the rate parameter\n# < 100.\n##\n\n\n@export\ndef make_rate_grid(max_mu=1200):\n # Start with 0.1 - 2, with 0.1 steps\n _q = np.arange(0.1, 2.1, 0.1).tolist()\n # Advance by 5% each step until 1200, i.e. +~6 sigma if true signal is 1000.\n while _q[-1] < max_mu:\n _q.append(_q[-1] * 1.05)\n # Round to one decimal, and at most three significant figures,\n # so results don't appear unreasonably precise\n return np.unique(np.round([float(\"%.3g\" % x) for x in _q], decimals=1))\n\n\nDEFAULT_RATE_GRID = make_rate_grid()\n# Prevent accidental clobbering later\nDEFAULT_RATE_GRID.flags.writeable = False\n"
] |
[
[
"numpy.array",
"scipy.stats.uniform",
"numpy.linspace"
],
[
"numpy.arange"
]
] |
MetaMain/BARZ
|
[
"458466e42ada076c4fb448098768b32356df0259"
] |
[
"BarrierZoneTrainer/BarrierZoneTrainer/BarrierZoneDefenseRandomized.py"
] |
[
"#This is the randomized version of the BARZ defense \r\nimport numpy\r\nimport DataManagerPytorch as DMP\r\n\r\nclass BarrierZoneDefenseRandomized():\r\n #Default constructor \r\n def __init__(self , modelPlusList, classNum, modelsPerEval):\r\n self.ModelPlusList=modelPlusList\r\n self.ClassNum=classNum\r\n self.ModelNum=len(self.ModelPlusList)\r\n self.Threshold=modelsPerEval\r\n self.ModelIndexList = list(range(0, self.ModelNum))\r\n\r\n #Majority voting AND thresholding \r\n def predictD(self, dataLoader, numClasses):\r\n #basic error checking\r\n if numClasses != self.ClassNum:\r\n raise ValueError(\"Class numbers don't match for BARZ defense\")\r\n sampleSize = len(dataLoader.dataset) \r\n\r\n #Randomized the list\r\n random.shuffle(self.ModelIndexList)\r\n\r\n modelVotes=numpy.zeros((self.ModelsPerEval, sampleSize, self.ClassNum)) \r\n #Get the votes for each of the networks \r\n for i in range(0, self.ModelsPerEval):\r\n modelIndex = self.ModelIndexList[i]\r\n print(\"Evaluating on model:\", self.ModelPlusList[modelIndex].modelName)\r\n modelVotes[i,:,:]=self.ModelPlusList[modelIndex].predictD(dataLoader, self.ClassNum)\r\n \r\n #Now do the voting \r\n finalVotes=numpy.zeros((sampleSize,self.ClassNum+1)) #The (n+1)th class is the noise class\r\n for i in range(0, sampleSize):\r\n currentTally=numpy.zeros((self.ClassNum,))\r\n for j in range(0, self.ModelNum):\r\n currentVote=modelVotes[j,i,:].argmax(axis=0)\r\n currentTally[currentVote]=currentTally[currentVote]+1\r\n if (currentTally[currentTally.argmax(axis=0)]>=self.Threshold): #Make sure it is above the threshold \r\n finalVotes[i,currentTally.argmax(axis=0)]=1.0\r\n else: #Make it the last \"noise\" class\r\n finalVotes[i,self.ClassNum]=1.0\r\n return finalVotes\r\n\r\n def validateD(self, dataLoader):\r\n accuracy=0\r\n sampleSize=len(dataLoader.dataset) \r\n multiModelOutput=self.predictD(dataLoader, self.ClassNum)\r\n xTest, yTest = DMP.DataLoaderToTensor(dataLoader)\r\n for i in range(0, sampleSize):\r\n if(multiModelOutput[i].argmax(axis=0)==yTest[i]):\r\n accuracy=accuracy+1\r\n accuracy=accuracy/sampleSize\r\n return accuracy\r\n\r\n #the network is fooled if we don't have a noise class label AND it gets the wrong label \r\n #Returns attack success rate \r\n def evaluateAdversarialAttackSuccessRate(self, advLoader):\r\n yPred = self.predictD(advLoader, self.ClassNum)\r\n xAdv, yCleanSingleVal = DMP.DataLoaderToTensor(advLoader)\r\n advAcc=0\r\n for i in range(0, xAdv.shape[0]):\r\n #The attack wins only if we don't correctly label the sample AND the sample isn't given the nosie class label\r\n if yPred[i].argmax(axis=0) != self.ClassNum and yPred[i].argmax(axis=0) != yCleanSingleVal[i]: #The last class is the noise class\r\n advAcc=advAcc+1\r\n advAcc=advAcc/ float(xAdv.shape[0])\r\n return advAcc\r\n\r\n"
] |
[
[
"numpy.zeros"
]
] |
agemagician/NeMo
|
[
"5839aee402f314aa413b28e9042b1e1cac10a114"
] |
[
"nemo/collections/nlp/models/machine_translation/mt_enc_dec_model.py"
] |
[
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport json\nimport random\nfrom multiprocessing import Value\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Union\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.utils.data as pt_data\nfrom omegaconf import DictConfig, ListConfig, OmegaConf\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.utilities import rank_zero_only\nfrom sacrebleu import corpus_bleu\n\nfrom nemo.collections.common.data import ConcatDataset\nfrom nemo.collections.common.losses import NLLLoss, SmoothedCrossEntropyLoss\nfrom nemo.collections.common.metrics import GlobalAverageLossMetric\nfrom nemo.collections.common.parts import transformer_weights_init\nfrom nemo.collections.common.tokenizers.chinese_tokenizers import ChineseProcessor\nfrom nemo.collections.common.tokenizers.en_ja_tokenizers import EnJaProcessor\nfrom nemo.collections.common.tokenizers.moses_tokenizers import MosesProcessor\nfrom nemo.collections.nlp.data import TarredTranslationDataset, TranslationDataset\nfrom nemo.collections.nlp.models.enc_dec_nlp_model import EncDecNLPModel\nfrom nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTEncDecModelConfig\nfrom nemo.collections.nlp.modules.common import TokenClassifier\nfrom nemo.collections.nlp.modules.common.lm_utils import get_transformer\nfrom nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer\nfrom nemo.collections.nlp.modules.common.transformer import BeamSearchSequenceGenerator, TopKSequenceGenerator\nfrom nemo.core.classes.common import PretrainedModelInfo, typecheck\nfrom nemo.utils import logging, model_utils\n\n__all__ = ['MTEncDecModel']\n\n\nclass MTEncDecModel(EncDecNLPModel):\n \"\"\"\n Encoder-decoder machine translation model.\n \"\"\"\n\n def __init__(self, cfg: MTEncDecModelConfig, trainer: Trainer = None):\n cfg = model_utils.convert_model_config_to_dict_config(cfg)\n # Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable\n # Global_rank and local_rank is set by LightningModule in Lightning 1.2.0\n\n self.world_size = 1\n if trainer is not None:\n self.world_size = trainer.num_nodes * trainer.num_gpus\n\n cfg = model_utils.maybe_update_config_version(cfg)\n\n self.src_language = cfg.get(\"src_language\", None)\n self.tgt_language = cfg.get(\"tgt_language\", None)\n\n self.multilingual = cfg.get(\"multilingual\", False)\n self.multilingual_ids = []\n\n # Instantiates tokenizers and register to be saved with NeMo Model archive\n # After this call, ther will be self.encoder_tokenizer and self.decoder_tokenizer\n # Which can convert between tokens and token_ids for SRC and TGT languages correspondingly.\n self.setup_enc_dec_tokenizers(\n encoder_tokenizer_library=cfg.encoder_tokenizer.get('library', 'yttm'),\n encoder_tokenizer_model=cfg.encoder_tokenizer.get('tokenizer_model'),\n encoder_bpe_dropout=cfg.encoder_tokenizer.get('bpe_dropout', 0.0),\n encoder_model_name=cfg.encoder.get('model_name') if hasattr(cfg.encoder, 'model_name') else None,\n decoder_tokenizer_library=cfg.decoder_tokenizer.get('library', 'yttm'),\n decoder_tokenizer_model=cfg.decoder_tokenizer.tokenizer_model,\n decoder_bpe_dropout=cfg.decoder_tokenizer.get('bpe_dropout', 0.0),\n decoder_model_name=cfg.decoder.get('model_name') if hasattr(cfg.decoder, 'model_name') else None,\n )\n\n if self.multilingual:\n if isinstance(self.src_language, ListConfig) and isinstance(self.tgt_language, ListConfig):\n raise ValueError(\n \"cfg.src_language and cfg.tgt_language cannot both be lists. We only support many-to-one or one-to-many multilingual models.\"\n )\n elif isinstance(self.src_language, ListConfig):\n for lng in self.src_language:\n self.multilingual_ids.append(self.encoder_tokenizer.token_to_id(\"<\" + lng + \">\"))\n elif isinstance(self.tgt_language, ListConfig):\n for lng in self.tgt_language:\n self.multilingual_ids.append(self.encoder_tokenizer.token_to_id(\"<\" + lng + \">\"))\n else:\n raise ValueError(\n \"Expect either cfg.src_language or cfg.tgt_language to be a list when multilingual=True.\"\n )\n\n if isinstance(self.src_language, ListConfig):\n self.tgt_language = [self.tgt_language] * len(self.src_language)\n else:\n self.src_language = [self.src_language] * len(self.tgt_language)\n\n self.source_processor_list = []\n self.target_processor_list = []\n for src_lng, tgt_lng in zip(self.src_language, self.tgt_language):\n src_prcsr, tgt_prscr = self.setup_pre_and_post_processing_utils(\n source_lang=src_lng, target_lang=tgt_lng\n )\n self.source_processor_list.append(src_prcsr)\n self.target_processor_list.append(tgt_prscr)\n\n else:\n # After this call, the model will have self.source_processor and self.target_processor objects\n self.setup_pre_and_post_processing_utils(source_lang=self.src_language, target_lang=self.tgt_language)\n self.multilingual_ids = [None]\n\n # TODO: Why is this base constructor call so late in the game?\n super().__init__(cfg=cfg, trainer=trainer)\n\n # encoder from NeMo, Megatron-LM, or HuggingFace\n encoder_cfg_dict = OmegaConf.to_container(cfg.get('encoder'))\n encoder_cfg_dict['vocab_size'] = self.encoder_vocab_size\n library = encoder_cfg_dict.pop('library', 'nemo')\n model_name = encoder_cfg_dict.pop('model_name', None)\n pretrained = encoder_cfg_dict.pop('pretrained', False)\n self.encoder = get_transformer(\n library=library,\n model_name=model_name,\n pretrained=pretrained,\n config_dict=encoder_cfg_dict,\n encoder=True,\n pre_ln_final_layer_norm=encoder_cfg_dict.get('pre_ln_final_layer_norm', False),\n )\n\n # decoder from NeMo, Megatron-LM, or HuggingFace\n decoder_cfg_dict = OmegaConf.to_container(cfg.get('decoder'))\n decoder_cfg_dict['vocab_size'] = self.decoder_vocab_size\n library = decoder_cfg_dict.pop('library', 'nemo')\n model_name = decoder_cfg_dict.pop('model_name', None)\n pretrained = decoder_cfg_dict.pop('pretrained', False)\n decoder_cfg_dict['hidden_size'] = self.encoder.hidden_size\n self.decoder = get_transformer(\n library=library,\n model_name=model_name,\n pretrained=pretrained,\n config_dict=decoder_cfg_dict,\n encoder=False,\n pre_ln_final_layer_norm=decoder_cfg_dict.get('pre_ln_final_layer_norm', False),\n )\n\n self.log_softmax = TokenClassifier(\n hidden_size=self.decoder.hidden_size,\n num_classes=self.decoder_vocab_size,\n activation=cfg.head.activation,\n log_softmax=cfg.head.log_softmax,\n dropout=cfg.head.dropout,\n use_transformer_init=cfg.head.use_transformer_init,\n )\n\n self.beam_search = BeamSearchSequenceGenerator(\n embedding=self.decoder.embedding,\n decoder=self.decoder.decoder,\n log_softmax=self.log_softmax,\n max_sequence_length=self.decoder.max_sequence_length,\n beam_size=cfg.beam_size,\n bos=self.decoder_tokenizer.bos_id,\n pad=self.decoder_tokenizer.pad_id,\n eos=self.decoder_tokenizer.eos_id,\n len_pen=cfg.len_pen,\n max_delta_length=cfg.max_generation_delta,\n )\n\n # tie weights of embedding and softmax matrices\n self.log_softmax.mlp.layer0.weight = self.decoder.embedding.token_embedding.weight\n\n # TODO: encoder and decoder with different hidden size?\n std_init_range = 1 / self.encoder.hidden_size ** 0.5\n\n # initialize weights if not using pretrained encoder/decoder\n if not self._cfg.encoder.get('pretrained', False):\n self.encoder.apply(lambda module: transformer_weights_init(module, std_init_range))\n\n if not self._cfg.decoder.get('pretrained', False):\n self.decoder.apply(lambda module: transformer_weights_init(module, std_init_range))\n\n self.log_softmax.apply(lambda module: transformer_weights_init(module, std_init_range))\n\n self.loss_fn = SmoothedCrossEntropyLoss(\n pad_id=self.decoder_tokenizer.pad_id, label_smoothing=cfg.label_smoothing\n )\n self.eval_loss_fn = NLLLoss(ignore_index=self.decoder_tokenizer.pad_id)\n\n def filter_predicted_ids(self, ids):\n ids[ids >= self.decoder_tokenizer.vocab_size] = self.decoder_tokenizer.unk_id\n return ids\n\n @typecheck()\n def forward(self, src, src_mask, tgt, tgt_mask):\n src_hiddens = self.encoder(input_ids=src, encoder_mask=src_mask)\n tgt_hiddens = self.decoder(\n input_ids=tgt, decoder_mask=tgt_mask, encoder_embeddings=src_hiddens, encoder_mask=src_mask\n )\n log_probs = self.log_softmax(hidden_states=tgt_hiddens)\n return log_probs\n\n def training_step(self, batch, batch_idx):\n \"\"\"\n Lightning calls this inside the training loop with the data from the training dataloader\n passed in as `batch`.\n \"\"\"\n # forward pass\n for i in range(len(batch)):\n if batch[i].ndim == 3:\n # Dataset returns already batched data and the first dimension of size 1 added by DataLoader\n # is excess.\n batch[i] = batch[i].squeeze(dim=0)\n src_ids, src_mask, tgt_ids, tgt_mask, labels = batch\n log_probs = self(src_ids, src_mask, tgt_ids, tgt_mask)\n train_loss = self.loss_fn(log_probs=log_probs, labels=labels)\n tensorboard_logs = {\n 'train_loss': train_loss,\n 'lr': self._optimizer.param_groups[0]['lr'],\n }\n return {'loss': train_loss, 'log': tensorboard_logs}\n\n def eval_step(self, batch, batch_idx, mode, dataloader_idx=0):\n for i in range(len(batch)):\n if batch[i].ndim == 3:\n # Dataset returns already batched data and the first dimension of size 1 added by DataLoader\n # is excess.\n batch[i] = batch[i].squeeze(dim=0)\n\n if self.multilingual:\n self.source_processor = self.source_processor_list[dataloader_idx]\n self.target_processor = self.target_processor_list[dataloader_idx]\n\n src_ids, src_mask, tgt_ids, tgt_mask, labels = batch\n log_probs = self(src_ids, src_mask, tgt_ids, tgt_mask)\n eval_loss = self.eval_loss_fn(log_probs=log_probs, labels=labels)\n # this will run encoder twice -- TODO: potentially fix\n _, translations = self.batch_translate(src=src_ids, src_mask=src_mask)\n if dataloader_idx == 0:\n getattr(self, f'{mode}_loss')(loss=eval_loss, num_measurements=log_probs.shape[0] * log_probs.shape[1])\n else:\n getattr(self, f'{mode}_loss_{dataloader_idx}')(\n loss=eval_loss, num_measurements=log_probs.shape[0] * log_probs.shape[1]\n )\n np_tgt = tgt_ids.detach().cpu().numpy()\n ground_truths = [self.decoder_tokenizer.ids_to_text(tgt) for tgt in np_tgt]\n ground_truths = [self.target_processor.detokenize(tgt.split(' ')) for tgt in ground_truths]\n num_non_pad_tokens = np.not_equal(np_tgt, self.decoder_tokenizer.pad_id).sum().item()\n return {\n 'translations': translations,\n 'ground_truths': ground_truths,\n 'num_non_pad_tokens': num_non_pad_tokens,\n }\n\n def test_step(self, batch, batch_idx, dataloader_idx=0):\n return self.eval_step(batch, batch_idx, 'test', dataloader_idx)\n\n @rank_zero_only\n def log_param_stats(self):\n for name, p in self.named_parameters():\n if p.requires_grad:\n self.trainer.logger.experiment.add_histogram(name + '_hist', p, global_step=self.global_step)\n self.trainer.logger.experiment.add_scalars(\n name,\n {'mean': p.mean(), 'stddev': p.std(), 'max': p.max(), 'min': p.min()},\n global_step=self.global_step,\n )\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0):\n \"\"\"\n Lightning calls this inside the validation loop with the data from the validation dataloader\n passed in as `batch`.\n \"\"\"\n return self.eval_step(batch, batch_idx, 'val', dataloader_idx)\n\n def eval_epoch_end(self, outputs, mode):\n # if user specifies one validation dataloader, then PTL reverts to giving a list of dictionary instead of a list of list of dictionary\n if isinstance(outputs[0], dict):\n outputs = [outputs]\n\n loss_list = []\n sb_score_list = []\n for dataloader_idx, output in enumerate(outputs):\n if dataloader_idx == 0:\n eval_loss = getattr(self, f'{mode}_loss').compute()\n else:\n eval_loss = getattr(self, f'{mode}_loss_{dataloader_idx}').compute()\n\n translations = list(itertools.chain(*[x['translations'] for x in output]))\n ground_truths = list(itertools.chain(*[x['ground_truths'] for x in output]))\n assert len(translations) == len(ground_truths)\n\n # Gather translations and ground truths from all workers\n tr_and_gt = [None for _ in range(self.world_size)]\n # we also need to drop pairs where ground truth is an empty string\n dist.all_gather_object(\n tr_and_gt, [(t, g) for (t, g) in zip(translations, ground_truths) if g.strip() != '']\n )\n if self.global_rank == 0:\n _translations = []\n _ground_truths = []\n for rank in range(0, self.world_size):\n _translations += [t for (t, g) in tr_and_gt[rank]]\n _ground_truths += [g for (t, g) in tr_and_gt[rank]]\n\n if self.tgt_language in ['ja']:\n sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize=\"ja-mecab\")\n elif self.tgt_language in ['zh']:\n sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize=\"zh\")\n else:\n sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize=\"13a\")\n\n # because the reduction op later is average (over word_size)\n sb_score = sacre_bleu.score * self.world_size\n\n dataset_name = \"Validation\" if mode == 'val' else \"Test\"\n logging.info(\n f\"Dataset name: {dataset_name}, Dataloader index: {dataloader_idx}, Set size: {len(translations)}\"\n )\n logging.info(\n f\"Dataset name: {dataset_name}, Dataloader index: {dataloader_idx}, Val Loss = {eval_loss}\"\n )\n logging.info(\n f\"Dataset name: {dataset_name}, Dataloader index: {dataloader_idx}, Sacre BLEU = {sb_score / self.world_size}\"\n )\n logging.info(\n f\"Dataset name: {dataset_name}, Dataloader index: {dataloader_idx}, Translation Examples:\"\n )\n for i in range(0, 3):\n ind = random.randint(0, len(translations) - 1)\n logging.info(\" \" + '\\u0332'.join(f\"Example {i}:\"))\n logging.info(f\" Prediction: {translations[ind]}\")\n logging.info(f\" Ground Truth: {ground_truths[ind]}\")\n else:\n sb_score = 0.0\n\n loss_list.append(eval_loss.cpu().numpy())\n sb_score_list.append(sb_score)\n if dataloader_idx == 0:\n self.log(f\"{mode}_loss\", eval_loss, sync_dist=True)\n self.log(f\"{mode}_sacreBLEU\", sb_score, sync_dist=True)\n getattr(self, f'{mode}_loss').reset()\n else:\n self.log(f\"{mode}_loss_dl_index_{dataloader_idx}\", eval_loss, sync_dist=True)\n self.log(f\"{mode}_sacreBLEU_dl_index_{dataloader_idx}\", sb_score, sync_dist=True)\n getattr(self, f'{mode}_loss_{dataloader_idx}').reset()\n\n if len(loss_list) > 1:\n self.log(f\"{mode}_loss_avg\", np.mean(loss_list), sync_dist=True)\n self.log(f\"{mode}_sacreBLEU_avg\", np.mean(sb_score_list), sync_dist=True)\n\n def validation_epoch_end(self, outputs):\n \"\"\"\n Called at the end of validation to aggregate outputs.\n :param outputs: list of individual outputs of each validation step.\n \"\"\"\n self.eval_epoch_end(outputs, 'val')\n\n def test_epoch_end(self, outputs):\n self.eval_epoch_end(outputs, 'test')\n\n def setup_enc_dec_tokenizers(\n self,\n encoder_tokenizer_library=None,\n encoder_tokenizer_model=None,\n encoder_bpe_dropout=0.0,\n encoder_model_name=None,\n decoder_tokenizer_library=None,\n decoder_tokenizer_model=None,\n decoder_bpe_dropout=0.0,\n decoder_model_name=None,\n ):\n\n supported_tokenizers = ['yttm', 'huggingface', 'sentencepiece']\n if (\n encoder_tokenizer_library not in supported_tokenizers\n or decoder_tokenizer_library not in supported_tokenizers\n ):\n raise NotImplementedError(f\"Currently we only support tokenizers in {supported_tokenizers}.\")\n\n self.encoder_tokenizer = get_nmt_tokenizer(\n library=encoder_tokenizer_library,\n tokenizer_model=self.register_artifact(\"encoder_tokenizer.tokenizer_model\", encoder_tokenizer_model),\n bpe_dropout=encoder_bpe_dropout,\n model_name=encoder_model_name,\n vocab_file=None,\n special_tokens=None,\n use_fast=False,\n )\n self.decoder_tokenizer = get_nmt_tokenizer(\n library=decoder_tokenizer_library,\n tokenizer_model=self.register_artifact(\"decoder_tokenizer.tokenizer_model\", decoder_tokenizer_model),\n bpe_dropout=decoder_bpe_dropout,\n model_name=decoder_model_name,\n vocab_file=None,\n special_tokens=None,\n use_fast=False,\n )\n\n def setup_training_data(self, train_data_config: Optional[DictConfig]):\n self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config)\n\n def setup_multiple_validation_data(self, val_data_config: Union[DictConfig, Dict]):\n self.setup_validation_data(self._cfg.get('validation_ds'))\n\n def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict]):\n self.setup_test_data(self._cfg.get('test_ds'))\n\n def setup_validation_data(self, val_data_config: Optional[DictConfig]):\n self._validation_dl = self._setup_eval_dataloader_from_config(cfg=val_data_config)\n # instantiate Torchmetric for each val dataloader\n if self._validation_dl is not None:\n for dataloader_idx in range(len(self._validation_dl)):\n if dataloader_idx == 0:\n setattr(\n self, f'val_loss', GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True),\n )\n else:\n setattr(\n self,\n f'val_loss_{dataloader_idx}',\n GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True),\n )\n\n def setup_test_data(self, test_data_config: Optional[DictConfig]):\n self._test_dl = self._setup_eval_dataloader_from_config(cfg=test_data_config)\n # instantiate Torchmetric for each test dataloader\n if self._test_dl is not None:\n for dataloader_idx in range(len(self._test_dl)):\n if dataloader_idx == 0:\n setattr(\n self, f'test_loss', GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True),\n )\n else:\n setattr(\n self,\n f'test_loss_{dataloader_idx}',\n GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True),\n )\n\n def _setup_dataloader_from_config(self, cfg: DictConfig):\n if cfg.get(\"use_tarred_dataset\", False):\n if cfg.get(\"metadata_file\") is None:\n raise FileNotFoundError(\"Trying to use tarred data set but could not find metadata path in config.\")\n else:\n if not self.multilingual:\n metadata_file_list = [cfg.get('metadata_file')]\n else:\n metadata_file_list = cfg.get('metadata_file')\n\n datasets = []\n for idx, metadata_file in enumerate(metadata_file_list):\n with open(metadata_file) as metadata_reader:\n metadata = json.load(metadata_reader)\n if cfg.get('tar_files') is None:\n tar_files = metadata.get('tar_files')\n if tar_files is not None:\n logging.info(f'Loading from tarred dataset {tar_files}')\n else:\n raise FileNotFoundError(\"Could not find tarred dataset in config or metadata.\")\n else:\n tar_files = cfg.get('tar_files')\n if self.multilingual:\n tar_files = tar_files[idx]\n if metadata.get('tar_files') is not None:\n logging.info(\n f'Tar file paths found in both cfg and metadata using one in cfg by default - {tar_files}'\n )\n\n dataset = TarredTranslationDataset(\n text_tar_filepaths=tar_files,\n metadata_path=metadata_file,\n encoder_tokenizer=self.encoder_tokenizer,\n decoder_tokenizer=self.decoder_tokenizer,\n shuffle_n=cfg.get(\"tar_shuffle_n\", 100),\n shard_strategy=cfg.get(\"shard_strategy\", \"scatter\"),\n global_rank=self.global_rank,\n world_size=self.world_size,\n reverse_lang_direction=cfg.get(\"reverse_lang_direction\", False),\n prepend_id=self.multilingual_ids[idx],\n )\n datasets.append(dataset)\n\n if self.multilingual:\n dataset = ConcatDataset(\n datasets=datasets,\n sampling_technique=cfg.get('concat_sampling_technique'),\n sampling_temperature=cfg.get('concat_sampling_temperature'),\n sampling_probabilities=cfg.get('concat_sampling_probabilities'),\n global_rank=self.global_rank,\n world_size=self.world_size,\n )\n else:\n dataset = datasets[0]\n\n return torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=1,\n num_workers=cfg.get(\"num_workers\", 2),\n pin_memory=cfg.get(\"pin_memory\", False),\n drop_last=cfg.get(\"drop_last\", False),\n )\n else:\n if not self.multilingual:\n src_file_list = [cfg.src_file_name]\n tgt_file_list = [cfg.tgt_file_name]\n else:\n src_file_list = cfg.src_file_name\n tgt_file_list = cfg.tgt_file_name\n\n if len(src_file_list) != len(tgt_file_list):\n raise ValueError(\n 'The same number of filepaths must be passed in for source and target while training multilingual.'\n )\n\n datasets = []\n for idx, src_file in enumerate(src_file_list):\n dataset = TranslationDataset(\n dataset_src=str(Path(src_file).expanduser()),\n dataset_tgt=str(Path(tgt_file_list[idx]).expanduser()),\n tokens_in_batch=cfg.tokens_in_batch,\n clean=cfg.get(\"clean\", False),\n max_seq_length=cfg.get(\"max_seq_length\", 512),\n min_seq_length=cfg.get(\"min_seq_length\", 1),\n max_seq_length_diff=cfg.get(\"max_seq_length_diff\", 512),\n max_seq_length_ratio=cfg.get(\"max_seq_length_ratio\", 512),\n cache_ids=cfg.get(\"cache_ids\", False),\n cache_data_per_node=cfg.get(\"cache_data_per_node\", False),\n use_cache=cfg.get(\"use_cache\", False),\n reverse_lang_direction=cfg.get(\"reverse_lang_direction\", False),\n prepend_id=self.multilingual_ids[idx],\n )\n dataset.batchify(self.encoder_tokenizer, self.decoder_tokenizer)\n datasets.append(dataset)\n\n if self.multilingual:\n dataset = ConcatDataset(\n datasets=datasets,\n shuffle=cfg.get('shuffle'),\n sampling_technique=cfg.get('concat_sampling_technique'),\n sampling_temperature=cfg.get('concat_sampling_temperature'),\n sampling_probabilities=cfg.get('concat_sampling_probabilities'),\n global_rank=self.global_rank,\n world_size=self.world_size,\n )\n return torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=1,\n num_workers=cfg.get(\"num_workers\", 2),\n pin_memory=cfg.get(\"pin_memory\", False),\n drop_last=cfg.get(\"drop_last\", False),\n )\n else:\n dataset = datasets[0]\n\n if cfg.shuffle:\n sampler = pt_data.RandomSampler(dataset)\n else:\n sampler = pt_data.SequentialSampler(dataset)\n return torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=1,\n sampler=sampler,\n num_workers=cfg.get(\"num_workers\", 2),\n pin_memory=cfg.get(\"pin_memory\", False),\n drop_last=cfg.get(\"drop_last\", False),\n )\n\n def replace_beam_with_sampling(self, topk=500):\n self.beam_search = TopKSequenceGenerator(\n embedding=self.decoder.embedding,\n decoder=self.decoder.decoder,\n log_softmax=self.log_softmax,\n max_sequence_length=self.beam_search.max_seq_length,\n beam_size=topk,\n bos=self.decoder_tokenizer.bos_id,\n pad=self.decoder_tokenizer.pad_id,\n eos=self.decoder_tokenizer.eos_id,\n )\n\n def _setup_eval_dataloader_from_config(self, cfg: DictConfig):\n src_file_name = cfg.get('src_file_name')\n tgt_file_name = cfg.get('tgt_file_name')\n\n if src_file_name is None or tgt_file_name is None:\n raise ValueError(\n 'Validation dataloader needs both cfg.src_file_name and cfg.tgt_file_name to not be None.'\n )\n else:\n # convert src_file_name and tgt_file_name to list of strings\n if isinstance(src_file_name, str):\n src_file_list = [src_file_name]\n elif isinstance(src_file_name, ListConfig):\n src_file_list = src_file_name\n else:\n raise ValueError(\"cfg.src_file_name must be string or list of strings\")\n if isinstance(tgt_file_name, str):\n tgt_file_list = [tgt_file_name]\n elif isinstance(tgt_file_name, ListConfig):\n tgt_file_list = tgt_file_name\n else:\n raise ValueError(\"cfg.tgt_file_name must be string or list of strings\")\n if len(src_file_list) != len(tgt_file_list):\n raise ValueError('The same number of filepaths must be passed in for source and target validation.')\n\n dataloaders = []\n prepend_idx = 0\n for idx, src_file in enumerate(src_file_list):\n if self.multilingual:\n prepend_idx = idx\n dataset = TranslationDataset(\n dataset_src=str(Path(src_file).expanduser()),\n dataset_tgt=str(Path(tgt_file_list[idx]).expanduser()),\n tokens_in_batch=cfg.tokens_in_batch,\n clean=cfg.get(\"clean\", False),\n max_seq_length=cfg.get(\"max_seq_length\", 512),\n min_seq_length=cfg.get(\"min_seq_length\", 1),\n max_seq_length_diff=cfg.get(\"max_seq_length_diff\", 512),\n max_seq_length_ratio=cfg.get(\"max_seq_length_ratio\", 512),\n cache_ids=cfg.get(\"cache_ids\", False),\n cache_data_per_node=cfg.get(\"cache_data_per_node\", False),\n use_cache=cfg.get(\"use_cache\", False),\n reverse_lang_direction=cfg.get(\"reverse_lang_direction\", False),\n prepend_id=self.multilingual_ids[prepend_idx],\n )\n dataset.batchify(self.encoder_tokenizer, self.decoder_tokenizer)\n\n if cfg.shuffle:\n sampler = pt_data.RandomSampler(dataset)\n else:\n sampler = pt_data.SequentialSampler(dataset)\n\n dataloader = torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=1,\n sampler=sampler,\n num_workers=cfg.get(\"num_workers\", 2),\n pin_memory=cfg.get(\"pin_memory\", False),\n drop_last=cfg.get(\"drop_last\", False),\n )\n dataloaders.append(dataloader)\n\n return dataloaders\n\n def setup_pre_and_post_processing_utils(self, source_lang, target_lang):\n \"\"\"\n Creates source and target processor objects for input and output pre/post-processing.\n \"\"\"\n self.source_processor, self.target_processor = None, None\n if (source_lang == 'en' and target_lang == 'ja') or (source_lang == 'ja' and target_lang == 'en'):\n self.source_processor = EnJaProcessor(source_lang)\n self.target_processor = EnJaProcessor(target_lang)\n else:\n if source_lang == 'zh':\n self.source_processor = ChineseProcessor()\n if target_lang == 'zh':\n self.target_processor = ChineseProcessor()\n if source_lang is not None and source_lang not in ['ja', 'zh']:\n self.source_processor = MosesProcessor(source_lang)\n if target_lang is not None and target_lang not in ['ja', 'zh']:\n self.target_processor = MosesProcessor(target_lang)\n\n return self.source_processor, self.target_processor\n\n @torch.no_grad()\n def batch_translate(\n self, src: torch.LongTensor, src_mask: torch.LongTensor,\n ):\n \"\"\"\t\n Translates a minibatch of inputs from source language to target language.\t\n Args:\t\n src: minibatch of inputs in the src language (batch x seq_len)\t\n src_mask: mask tensor indicating elements to be ignored (batch x seq_len)\t\n Returns:\t\n translations: a list strings containing detokenized translations\t\n inputs: a list of string containing detokenized inputs\t\n \"\"\"\n mode = self.training\n try:\n self.eval()\n src_hiddens = self.encoder(input_ids=src, encoder_mask=src_mask)\n beam_results = self.beam_search(encoder_hidden_states=src_hiddens, encoder_input_mask=src_mask)\n beam_results = self.filter_predicted_ids(beam_results)\n\n translations = [self.decoder_tokenizer.ids_to_text(tr) for tr in beam_results.cpu().numpy()]\n inputs = [self.encoder_tokenizer.ids_to_text(inp) for inp in src.cpu().numpy()]\n if self.target_processor is not None:\n translations = [\n self.target_processor.detokenize(translation.split(' ')) for translation in translations\n ]\n\n if self.source_processor is not None:\n inputs = [self.source_processor.detokenize(item.split(' ')) for item in inputs]\n finally:\n self.train(mode=mode)\n return inputs, translations\n\n # TODO: We should drop source/target_lang arguments in favor of using self.src/tgt_language\n @torch.no_grad()\n def translate(self, text: List[str], source_lang: str = None, target_lang: str = None) -> List[str]:\n \"\"\"\n Translates list of sentences from source language to target language.\n Should be regular text, this method performs its own tokenization/de-tokenization\n Args:\n text: list of strings to translate\n source_lang: if not None, corresponding MosesTokenizer and MosesPunctNormalizer will be run\n target_lang: if not None, corresponding MosesDecokenizer will be run\n Returns:\n list of translated strings\n \"\"\"\n # __TODO__: This will reset both source and target processors even if you want to reset just one.\n if source_lang is not None or target_lang is not None:\n self.setup_pre_and_post_processing_utils(source_lang, target_lang)\n\n mode = self.training\n prepend_ids = []\n if self.multilingual:\n if source_lang is None or target_lang is None:\n raise ValueError(\"Expect source_lang and target_lang to infer for multilingual model.\")\n src_symbol = self.encoder_tokenizer.token_to_id('<' + source_lang + '>')\n tgt_symbol = self.encoder_tokenizer.token_to_id('<' + target_lang + '>')\n prepend_ids = [src_symbol if src_symbol in self.multilingual_ids else tgt_symbol]\n try:\n self.eval()\n inputs = []\n for txt in text:\n if self.source_processor is not None:\n txt = self.source_processor.normalize(txt)\n txt = self.source_processor.tokenize(txt)\n ids = self.encoder_tokenizer.text_to_ids(txt)\n ids = prepend_ids + [self.encoder_tokenizer.bos_id] + ids + [self.encoder_tokenizer.eos_id]\n inputs.append(ids)\n max_len = max(len(txt) for txt in inputs)\n src_ids_ = np.ones((len(inputs), max_len)) * self.encoder_tokenizer.pad_id\n for i, txt in enumerate(inputs):\n src_ids_[i][: len(txt)] = txt\n\n src_mask = torch.FloatTensor((src_ids_ != self.encoder_tokenizer.pad_id)).to(self.device)\n src = torch.LongTensor(src_ids_).to(self.device)\n _, translations = self.batch_translate(src, src_mask)\n finally:\n self.train(mode=mode)\n return translations\n\n @classmethod\n def list_available_models(cls) -> Optional[Dict[str, str]]:\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n\n Returns:\n List of available pre-trained models.\n \"\"\"\n result = []\n model = PretrainedModelInfo(\n pretrained_model_name=\"nmt_en_de_transformer12x2\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_de_transformer12x2/versions/1.0.0rc1/files/nmt_en_de_transformer12x2.nemo\",\n description=\"En->De translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_de_transformer12x2\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"nmt_de_en_transformer12x2\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_de_en_transformer12x2/versions/1.0.0rc1/files/nmt_de_en_transformer12x2.nemo\",\n description=\"De->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_de_en_transformer12x2\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"nmt_en_es_transformer12x2\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_es_transformer12x2/versions/1.0.0rc1/files/nmt_en_es_transformer12x2.nemo\",\n description=\"En->Es translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_es_transformer12x2\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"nmt_es_en_transformer12x2\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_es_en_transformer12x2/versions/1.0.0rc1/files/nmt_es_en_transformer12x2.nemo\",\n description=\"Es->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_es_en_transformer12x2\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"nmt_en_fr_transformer12x2\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_fr_transformer12x2/versions/1.0.0rc1/files/nmt_en_fr_transformer12x2.nemo\",\n description=\"En->Fr translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_fr_transformer12x2\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"nmt_fr_en_transformer12x2\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_fr_en_transformer12x2/versions/1.0.0rc1/files/nmt_fr_en_transformer12x2.nemo\",\n description=\"Fr->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_fr_en_transformer12x2\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"nmt_en_ru_transformer6x6\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_ru_transformer6x6/versions/1.0.0rc1/files/nmt_en_ru_transformer6x6.nemo\",\n description=\"En->Ru translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_ru_transformer6x6\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"nmt_ru_en_transformer6x6\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_ru_en_transformer6x6/versions/1.0.0rc1/files/nmt_ru_en_transformer6x6.nemo\",\n description=\"Ru->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_ru_en_transformer6x6\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"nmt_zh_en_transformer6x6\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_zh_en_transformer6x6/versions/1.0.0rc1/files/nmt_zh_en_transformer6x6.nemo\",\n description=\"Zh->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_zh_en_transformer6x6\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"nmt_en_zh_transformer6x6\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_zh_transformer6x6/versions/1.0.0rc1/files/nmt_en_zh_transformer6x6.nemo\",\n description=\"En->Zh translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_zh_transformer6x6\",\n )\n result.append(model)\n\n return result\n"
] |
[
[
"torch.LongTensor",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.FloatTensor",
"torch.no_grad",
"numpy.mean",
"numpy.not_equal"
]
] |
SHMingLiu/Deep-Supervised-Learning-in-Metal-Forming
|
[
"42cb3f29bf64d6e9225541943f117099c37c88f6"
] |
[
"Theory-guided_Deep_Learning/utils.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 1 14:37:12 2020\r\n\r\n@author: sl7516\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport json\r\nimport os\r\n\r\nRESULTS_DIR = 'results/'\r\n\r\n\r\ndef print_json(result):\r\n # print result in a jsonable structure\r\n print (json.dumps(\r\n result,\r\n default=None, sort_keys=False, \r\n indent=4, separators=(',',': ')\r\n ))\r\n\r\ndef save_json_result(model_name, result):\r\n # save the result in json format\r\n result_name = '{}.txt.json'.format(model_name)\r\n if not os.path.exists(RESULTS_DIR):\r\n os.makedirs(RESULTS_DIR)\r\n with open(os.path.join(RESULTS_DIR, result_name), 'w') as f:\r\n json.dump(\r\n result, f,\r\n default=None, sort_keys=False, \r\n indent=4, separators=(',',': ')\r\n )\r\n \r\ndef load_json_result(best_result_name):\r\n result_path = os.path.join(RESULTS_DIR, best_result_name)\r\n with open(result_path, 'r') as f:\r\n return json.load(f)\r\n \r\ndef load_best_hyperspace():\r\n results = [\r\n f for f in list(sorted(os.listdir(RESULTS_DIR))) if 'json' in f\r\n ]\r\n if len(results) ==0:\r\n return None\r\n \r\n best_result_name = results[-1]\r\n return load_json_result(best_result_name)['hyper_space']\r\n \r\n \r\ndef standardization(original_data):\r\n mean = np.mean(original_data)\r\n std = np.std(original_data)\r\n data_processed = []\r\n for i in range(len(original_data)):\r\n data_processed.append((original_data[i]-mean)/std)\r\n \r\n return data_processed, mean, std\r\n\r\ndef un_standardization(data_processed, mean, std):\r\n original_data = []\r\n for i in range(len(data_processed)):\r\n original_data.append(data_processed[i]*std + mean)\r\n \r\n return original_data\r\n"
] |
[
[
"numpy.std",
"numpy.mean"
]
] |
alaniwi/cf-python
|
[
"16525ea97df9a3f5e8146b1386e1732fb8f7c0e5"
] |
[
"cf/domain.py"
] |
[
"import logging\nfrom functools import reduce\nfrom operator import mul as operator_mul\n\nimport cfdm\nimport numpy as np\n\nfrom . import mixin\nfrom .constructs import Constructs\nfrom .data import Data\nfrom .decorators import _inplace_enabled, _inplace_enabled_define_and_cleanup\nfrom .functions import _DEPRECATION_ERROR_ARG, parse_indices\n\nlogger = logging.getLogger(__name__)\n\n_empty_set = set()\n\n\nclass Domain(mixin.FieldDomain, mixin.Properties, cfdm.Domain):\n \"\"\"A domain construct of the CF data model.\n\n The domain represents a set of discrete \"locations\" in what\n generally would be a multi-dimensional space, either in the real\n world or in a model's simulated world. The data array elements of\n a field construct correspond to individual location of a domain.\n\n The domain construct is defined collectively by the following\n constructs of the CF data model: domain axis, dimension\n coordinate, auxiliary coordinate, cell measure, coordinate\n reference, and domain ancillary constructs; as well as properties\n to describe the domain.\n\n **NetCDF interface**\n\n {{netCDF variable}}\n\n {{netCDF global attributes}}\n\n {{netCDF group attributes}}\n\n {{netCDF geometry group}}\n\n Some components exist within multiple constructs, but when written\n to a netCDF dataset the netCDF names associated with such\n components will be arbitrarily taken from one of them. The netCDF\n variable, dimension and sample dimension names and group\n structures for such components may be set or removed consistently\n across all such components with the `nc_del_component_variable`,\n `nc_set_component_variable`, `nc_set_component_variable_groups`,\n `nc_clear_component_variable_groups`,\n `nc_del_component_dimension`, `nc_set_component_dimension`,\n `nc_set_component_dimension_groups`,\n `nc_clear_component_dimension_groups`,\n `nc_del_component_sample_dimension`,\n `nc_set_component_sample_dimension`,\n `nc_set_component_sample_dimension_groups`,\n `nc_clear_component_sample_dimension_groups` methods.\n\n A domain construct of the CF data model.\n\n The domain represents a set of discrete \"locations\" in what\n generally would be a multi-dimensional space, either in the real\n world or in a model's simulated world. The data array elements of\n a field construct correspond to individual location of a domain.\n\n The domain construct is defined collectively by the following\n constructs of the CF data model: domain axis, dimension\n coordinate, auxiliary coordinate, cell measure, coordinate\n reference, and domain ancillary constructs; as well as properties\n to describe the domain.\n\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n \"\"\"Creates a new Domain instance.\"\"\"\n instance = super().__new__(cls)\n instance._Data = Data\n instance._Constructs = Constructs\n return instance\n\n def __repr__(self):\n \"\"\"Called by the `repr` built-in function.\n\n x.__repr__() <==> repr(x)\n\n \"\"\"\n return super().__repr__().replace(\"<\", \"<CF \", 1)\n\n @property\n def _cyclic(self):\n \"\"\"Storage for axis cyclicity.\n\n Do not change the value in-place.\n\n \"\"\"\n return self._custom.get(\"_cyclic\", _empty_set)\n\n @_cyclic.setter\n def _cyclic(self, value):\n \"\"\"value must be a set.\n\n Do not change the value in-place.\n\n \"\"\"\n self._custom[\"_cyclic\"] = value\n\n @_cyclic.deleter\n def _cyclic(self):\n self._custom[\"_cyclic\"] = _empty_set\n\n @property\n def size(self):\n \"\"\"The number of locations in the domain.\n\n If there are no domain axis constructs, or any domain axis\n construct has a size of 0, then the size is 0.\n\n \"\"\"\n domain_axes = self.domain_axes(todict=True)\n if not domain_axes:\n return 0\n\n return reduce(\n operator_mul,\n [domain_axis.get_size(0) for domain_axis in domain_axes.values()],\n 1,\n )\n\n def close(self):\n \"\"\"Close all files referenced by the domain construct.\n\n Note that a closed file will be automatically reopened if its\n contents are subsequently required.\n\n :Returns:\n\n `None`\n\n **Examples:**\n\n >>> d.close()\n\n \"\"\"\n # TODODASK - is this still needed?\n\n self.constructs.close()\n\n # def cyclic(\n # self, *identity, iscyclic=True, period=None, config={}, **filter_kwargs\n # ):\n # \"\"\"Set the cyclicity of an axis.\n #\n # .. versionadded:: 3.11.0\n #\n # .. seealso:: `autocyclic`, `domain_axis`, `iscyclic`\n #\n # :Parameters:\n #\n # identity, filter_kwargs: optional\n # Select the unique domain axis construct returned by\n # ``f.domain_axis(*identity, **filter_kwargs)``. See\n # `domain_axis` for details.\n #\n # iscyclic: `bool`, optional\n # If False then the axis is set to be non-cyclic. By\n # default the selected axis is set to be cyclic.\n #\n # period: optional\n # The period for a dimension coordinate construct which\n # spans the selected axis. May be any numeric scalar\n # object that can be converted to a `Data` object (which\n # includes numpy array and `Data` objects). The absolute\n # value of *period* is used. If *period* has units then\n # they must be compatible with those of the dimension\n # coordinates, otherwise it is assumed to have the same\n # units as the dimension coordinates.\n #\n # config: `dict`\n # Additional parameters for optimizing the\n # operation. See the code for details.\n #\n # :Returns:\n #\n # `set`\n # The construct keys of the domain axes which were cyclic\n # prior to the new setting, or the current cyclic domain\n # axes if no axis was specified.\n #\n # **Examples:**\n #\n # >>> f.cyclic()\n # set()\n # >>> f.cyclic('X', period=360)\n # set()\n # >>> f.cyclic()\n # {'domainaxis2'}\n # >>> f.cyclic('X', iscyclic=False)\n # {'domainaxis2'}\n # >>> f.cyclic()\n # set()\n #\n # \"\"\"\n # cyclic = self._cyclic\n # old = cyclic.copy()\n #\n # if identity is None:\n # return old\n #\n # axis = self.domain_axis(identity, key=True)\n #\n # if iscyclic:\n # dim = self.dimension_coordinate(axis, default=None)\n # if dim is not None:\n # if period is not None:\n # dim.period(period)\n # elif dim.period() is None:\n # raise ValueError(\n # \"A cyclic dimension coordinate must have a period\"\n # )\n #\n # # Never change _cyclic in-place\n # self._cyclic = cyclic.union((axis,))\n #\n # return old\n #\n # def domain_axis(self, identity=None, key=False, item=False,\n # default=ValueError()):\n # \"\"\"Return a domain axis construct, or its key.\n #\n # .. versionadded:: 3.11.0\n #\n # .. seealso:: `construct`, `auxiliary_coordinate`, `cell_measure`,\n # `cell_method`, `coordinate`, `coordinate_reference`,\n # `dimension_coordinate`, `domain_ancillary`,\n # `domain_axes`, `field_ancillary`\n #\n # :Parameters:\n #\n # identity: optional\n # Select the domain axis construct.\n #\n # {{domain axis selection}}\n #\n # If *identity is `None` (the default) then the unique\n # domain axis construct is selected when there is only one\n # of them.\n #\n # *Parameter example:*\n # ``identity='time'``\n #\n # *Parameter example:*\n # ``identity='domainaxis2'``\n #\n # *Parameter example:*\n # ``identity='ncdim%y'``\n #\n # key: `bool`, optional\n # If True then return the selected construct key. By\n # default the construct itself is returned.\n #\n # default: optional\n # Return the value of the *default* parameter if a construct\n # can not be found.\n #\n # {{default Exception}}\n #\n # :Returns:\n #\n # `DomainAxis` or `str`\n # The selected domain axis construct, or its key.\n #\n # **Examples:**\n #\n # \"\"\"\n # c = self.domain_axes(identity)\n #\n # n = len(c)\n # if n == 1:\n # k, construct = c.popitem()\n # if key:\n # return k\n #\n # if item:\n # return k, construct\n #\n # return construct\n # elif n > 1:\n # if default is None:\n # return default\n #\n # return self._default(\n # default,\n # f\"{self.__class__.__name__}.{_method}() can't return {n} \"\n # \"constructs\",\n # )\n #\n # # identity is not a unique domain axis construct identity\n # da_key = self.domain_axis_key(identity, default=None)\n # if da_key is None:\n # if default is None:\n # return default\n #\n # return self._default(\n # default,\n # message=f\"No domain axis found from identity {identity!r}\",\n # )\n #\n # if key:\n # return da_key\n #\n # return self.constructs[da_key]\n\n @_inplace_enabled(default=False)\n def flip(self, axes=None, inplace=False):\n \"\"\"Flip (reverse the direction of) domain axes.\n\n .. seealso:: `domain_axis`, `transpose`\n\n :Parameters:\n\n axes: (sequence of) `str` , optional\n Select the domain axes to flip.\n\n A domain axis is identified by that which would be\n selected by passing a given axis description to a call of\n the `domain_axis` method. For example, a value of ``'X'``\n would select the domain axis construct returned by\n ``f.domain_axis('X')``.\n\n If no axes are provided then all axes are flipped.\n\n {{inplace: `bool`, optional}}\n\n :Returns:\n\n `Domain` or `None`\n The domain with flipped axes, or `None` if the operation\n was in-place.\n\n **Examples:**\n\n >>> d = cf.example_field(0).domain\n >>> print(d)\n Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north\n : longitude(8) = [22.5, ..., 337.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n\n >>> print(d.flip('X'))\n Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north\n : longitude(8) = [337.5, ..., 22.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n\n >>> print(d.flip(['T', 'Y']))\n Dimension coords: latitude(5) = [75.0, ..., -75.0] degrees_north\n : longitude(8) = [22.5, ..., 337.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n\n >>> print(d.flip())\n Dimension coords: latitude(5) = [75.0, ..., -75.0] degrees_north\n : longitude(8) = [337.5, ..., 22.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n\n \"\"\"\n d = _inplace_enabled_define_and_cleanup(self)\n\n if axes is None:\n # Flip all the axes\n axes = self.domain_axes(todict=True)\n else:\n axes = self._parse_axes(axes)\n\n axes = set(axes)\n\n # Flip constructs with data\n d.constructs._flip(axes)\n\n return d\n\n def get_data(self, default=ValueError(), _units=None, _fill_value=True):\n \"\"\"Return a default value when data is requested.\n\n A `Domain` instance can never have data, so a default value\n must be returned if data is requested. This is useful for\n cases when it is not known in advance if a `Field` or `Domain`\n instance is in use.\n\n .. versionadded:: 3.11.0\n\n .. seealso:: `has_data`\n\n :Parameters:\n\n default: optional\n Return the value of the *default* parameter.\n\n {{default Exception}}\n\n _units: optional\n Ignored.\n\n _fill_value: optional\n Ignored.\n\n :Returns:\n\n The value of the *default* parameter, if an exception\n has not been raised.\n\n **Examples:**\n\n >>> d = cf.example_domain(0)\n >>> print(d.get_data(None))\n None\n >>> d.get_data()\n Traceback (most recent call last):\n ...\n ValueError: Domain has no data\n\n \"\"\"\n if default is None:\n return\n\n return self._default(\n default, message=f\"{self.__class__.__name__} has no data\"\n )\n\n def get_data_axes(self, identity, default=ValueError()):\n \"\"\"Return the keys of the domain axis constructs spanned by the\n data of a metadata construct.\n\n .. versionadded:: 3.11.0\n\n .. seealso:: `del_data_axes`, `has_data_axes`, `set_data_axes`\n\n :Parameters:\n\n identity: optional\n Select the construct by one of\n\n * A metadata construct identity.\n\n {{construct selection identity}}\n\n * The key of a metadata construct\n\n * `None`. This is the default, which selects the metadata\n construct when there is only one of them.\n\n *Parameter example:*\n ``identity='latitude'``\n\n *Parameter example:*\n ``identity='T'\n\n *Parameter example:*\n ``identity='long_name=Cell Area'``\n\n *Parameter example:*\n ``identity='cellmeasure1'``\n\n *Parameter example:*\n ``identity='measure:area'``\n\n *Parameter example:*\n ``identity=cf.eq('time')'``\n\n *Parameter example:*\n ``identity=re.compile('^lat')``\n\n default: optional\n Return the value of the *default* parameter if the data\n axes have not been set.\n\n {{default Exception}}\n\n :Returns:\n\n `tuple`\n The keys of the domain axis constructs spanned by the data.\n\n **Examples:**\n\n >>> d = cf.example_field(7).domain\n >>> print(d)\n Dimension coords: time(3) = [1979-05-01 12:00:00, 1979-05-02 12:00:00, 1979-05-03 12:00:00] gregorian\n : air_pressure(1) = [850.0] hPa\n : grid_latitude(4) = [0.44, ..., -0.88] degrees\n : grid_longitude(5) = [-1.18, ..., 0.58] degrees\n Auxiliary coords: latitude(grid_latitude(4), grid_longitude(5)) = [[52.4243, ..., 51.1163]] degrees_north\n : longitude(grid_latitude(4), grid_longitude(5)) = [[8.0648, ..., 10.9238]] degrees_east\n Coord references: grid_mapping_name:rotated_latitude_longitude\n >>> print(d.constructs)\n Constructs:\n {'auxiliarycoordinate0': <CF AuxiliaryCoordinate: latitude(4, 5) degrees_north>,\n 'auxiliarycoordinate1': <CF AuxiliaryCoordinate: longitude(4, 5) degrees_east>,\n 'coordinatereference0': <CF CoordinateReference: grid_mapping_name:rotated_latitude_longitude>,\n 'dimensioncoordinate0': <CF DimensionCoordinate: time(3) days since 1979-1-1 gregorian>,\n 'dimensioncoordinate1': <CF DimensionCoordinate: air_pressure(1) hPa>,\n 'dimensioncoordinate2': <CF DimensionCoordinate: grid_latitude(4) degrees>,\n 'dimensioncoordinate3': <CF DimensionCoordinate: grid_longitude(5) degrees>,\n 'domainaxis0': <CF DomainAxis: size(3)>,\n 'domainaxis1': <CF DomainAxis: size(1)>,\n 'domainaxis2': <CF DomainAxis: size(4)>,\n 'domainaxis3': <CF DomainAxis: size(5)>}\n >>> d.get_data_axes('grid_latitude')\n ('domainaxis2',)\n >>> d.get_data_axes('latitude')\n ('domainaxis2', 'domainaxis3')\n\n \"\"\"\n key = self.construct(identity, key=True, default=None)\n if key is None:\n return self.construct_key(identity, default=default)\n\n return super().get_data_axes(key=key, default=default)\n\n def identity(self, default=\"\", strict=False, relaxed=False, nc_only=False):\n \"\"\"Return the canonical identity.\n\n By default the identity is the first found of the following:\n\n * The \"id\" attribute, preceded by ``'id%'``.\n * The \"cf_role\" property, preceded by ``'cf_role='``.\n * The \"long_name\" property, preceded by ``'long_name='``.\n * The netCDF variable name, preceded by ``'ncvar%'``.\n * The value of the *default* parameter.\n\n .. versionadded:: 3.11.0\n\n .. seealso:: `id`, `identities`\n\n :Parameters:\n\n default: optional\n If no identity can be found then return the value of the\n default parameter.\n\n strict: `bool`, optional\n If True then the identity is the first found of only the\n \"standard_name\" property or the \"id\" attribute.\n\n relaxed: `bool`, optional\n If True then the identity is the first found of only the\n \"standard_name\" property, the \"id\" attribute, the\n \"long_name\" property or the netCDF variable name.\n\n nc_only: `bool`, optional\n If True then only take the identity from the netCDF\n variable name.\n\n :Returns:\n\n The identity.\n\n **Examples:**\n\n >>> f.properties()\n {'foo': 'bar',\n 'long_name': 'Air Temperature',\n 'standard_name': 'air_temperature'}\n >>> f.nc_get_variable()\n 'tas'\n >>> f.identity()\n 'air_temperature'\n >>> f.del_property('standard_name')\n 'air_temperature'\n >>> f.identity(default='no identity')\n 'air_temperature'\n >>> f.identity()\n 'long_name=Air Temperature'\n >>> f.del_property('long_name')\n >>> f.identity()\n 'ncvar%tas'\n >>> f.nc_del_variable()\n 'tas'\n >>> f.identity()\n 'ncvar%tas'\n >>> f.identity()\n ''\n >>> f.identity(default='no identity')\n 'no identity'\n\n \"\"\"\n if nc_only:\n if strict:\n raise ValueError(\n \"'strict' and 'nc_only' parameters cannot both be True\"\n )\n\n if relaxed:\n raise ValueError(\n \"'relaxed' and 'nc_only' parameters cannot both be True\"\n )\n\n n = self.nc_get_variable(None)\n if n is not None:\n return f\"ncvar%{n}\"\n\n return default\n\n n = getattr(self, \"id\", None)\n if n is not None:\n return f\"id%{n}\"\n\n if relaxed:\n n = self.get_property(\"long_name\", None)\n if n is not None:\n return f\"long_name={n}\"\n\n n = self.nc_get_variable(None)\n if n is not None:\n return f\"ncvar%{n}\"\n\n return default\n\n if strict:\n return default\n\n for prop in (\"cf_role\", \"long_name\"):\n n = self.get_property(prop, None)\n if n is not None:\n return f\"{prop}={n}\"\n\n n = self.nc_get_variable(None)\n if n is not None:\n return f\"ncvar%{n}\"\n\n return default\n\n def identities(self):\n \"\"\"Return all possible identities.\n\n The identities comprise:\n\n * The \"id\" attribute, preceded by ``'id%'``.\n * The ``cf_role`` property, preceeded by ``'cf_role='``.\n * The ``long_name`` property, preceeded by ``'long_name='``.\n * All other properties, preceeded by the property name and a\n equals e.g. ``'foo=bar'``.\n * The netCDF variable name, preceeded by ``'ncvar%'``.\n\n .. versionadded:: (cfdm) 1.9.0.0\n\n .. seealso:: `identity`\n\n :Returns:\n\n `list`\n The identities.\n\n **Examples:**\n\n >>> d = {{package}}.Domain()\n >>> d.set_properties({'foo': 'bar',\n ... 'long_name': 'Domain for model'})\n >>> d.nc_set_variable('dom1')\n >>> d.identities()\n ['long_name=Domain for model', 'foo=bar', 'ncvar%dom1']\n\n \"\"\"\n out = super().identities()\n\n i = getattr(self, \"id\", None)\n if i is not None:\n # Insert id attribute\n i = f\"id%{i}\"\n if not out:\n out = [i]\n else:\n out.insert(0, i)\n\n return out\n\n def indices(self, *mode, **kwargs):\n \"\"\"Create indices that define a subspace of the domain\n construct.\n\n The indices returned by this method be used to create the subspace\n by passing them to the `subspace` method of the original domain\n construct.\n\n The subspace is defined by identifying indices based on the\n metadata constructs.\n\n Metadata constructs are selected conditions are specified on their\n data. Indices for subspacing are then automatically inferred from\n where the conditions are met.\n\n Metadata constructs and the conditions on their data are defined\n by keyword parameters.\n\n * Any domain axes that have not been identified remain unchanged.\n\n * Multiple domain axes may be subspaced simultaneously, and it\n doesn't matter which order they are specified in.\n\n * Explicit indices may also be assigned to a domain axis\n identified by a metadata construct, with either a Python `slice`\n object, or a sequence of integers or booleans.\n\n * For a dimension that is cyclic, a subspace defined by a slice or\n by a `Query` instance is assumed to \"wrap\" around the edges of\n the data.\n\n * Conditions may also be applied to multi-dimensional metadata\n constructs. The \"compress\" mode is still the default mode (see\n the positional arguments), but because the indices may not be\n acting along orthogonal dimensions, some missing data may still\n need to be inserted into the field construct's data.\n\n .. versionadded:: 3.11.0\n\n .. seealso:: `subspace`, `where`, `__getitem__`, `__setitem__`\n\n :Parameters:\n\n mode: `str`, *optional*\n There are two modes of operation, each of which provides\n indices for a different type of subspace:\n\n ============== ==========================================\n *mode* Description\n ============== ==========================================\n ``'compress'`` Return indices that identify only the\n requested locations.\n\n This is the default mode.\n\n Note that if a multi-dimensional metadata\n construct is being used to define the\n indices then some unrequested locations\n may also be selected.\n\n ``'envelope'`` The returned subspace is the smallest that\n contains all of the requested locations.\n ============== ==========================================\n\n kwargs: *optional*\n A keyword name is an identity of a metadata construct, and\n the keyword value provides a condition for inferring\n indices that apply to the dimension (or dimensions)\n spanned by the metadata construct's data. Indices are\n created that select every location for which the metadata\n construct's data satisfies the condition.\n\n :Returns:\n\n `dict`\n A dictionary of indices, keyed by the domain axis\n construct identifiers to which they apply.\n\n **Examples:**\n\n >>> d = cf.example_field(0).domain\n >>> print(d)\n Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north\n : longitude(8) = [22.5, ..., 337.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n >>> indices = d.indices(X=112.5)\n >>> indices\n {'domainaxis0': slice(0, 5, 1),\n 'domainaxis1': slice(2, 3, 1),\n 'domainaxis2': slice(0, 1, 1)}\n >>> print(d.subspace(**indices))\n Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north\n : longitude(1) = [112.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n\n >>> indices = d.indices(X=112.5, Y=cf.wi(-60, 30))\n >>> indices\n {'domainaxis0': slice(1, 3, 1),\n 'domainaxis1': slice(2, 3, 1),\n 'domainaxis2': slice(0, 1, 1)}\n >>> print(d.subspace(**indices))\n Dimension coords: latitude(2) = [-45.0, 0.0] degrees_north\n : longitude(1) = [112.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n\n >>> d.indices(X=[-1, 0], Y=slice(1, -1))\n {'domainaxis0': slice(1, 4, 1),\n 'domainaxis1': slice(7, None, -7),\n 'domainaxis2': slice(0, 1, 1)}\n >>> print(print(d.subspace(**indices)))\n Dimension coords: latitude(3) = [-45.0, 0.0, 45.0] degrees_north\n : longitude(2) = [337.5, 22.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n\n \"\"\"\n if len(mode) > 1:\n raise ValueError(\n \"Can't provide more than one positional argument. \"\n f\"Got: {', '.join(repr(x) for x in mode)}\"\n )\n\n if not mode or \"compress\" in mode:\n mode = \"compress\"\n elif \"envelope\" in mode:\n mode = \"envelope\"\n else:\n raise ValueError(f\"Invalid value for 'mode' argument: {mode[0]!r}\")\n\n # ------------------------------------------------------------\n # Get the indices for every domain axis in the domain, without\n # any auxiliary masks.\n # ------------------------------------------------------------\n domain_indices = self._indices(mode, None, False, **kwargs)\n\n # ------------------------------------------------------------\n # Return the indices\n # ------------------------------------------------------------\n return domain_indices[\"indices\"]\n\n def match_by_construct(self, *identities, OR=False, **conditions):\n \"\"\"Whether or not there are particular metadata constructs.\n\n .. versionadded:: 3.11.0\n\n .. seealso:: `match`, `match_by_property`, `match_by_rank`,\n `match_by_identity`, `match_by_ncvar`\n\n :Parameters:\n\n identities: optional\n Identify the metadata constructs by one or more of\n\n * A metadata construct identity.\n\n {{construct selection identity}}\n\n * The key of a metadata construct\n\n If a cell method construct identity is given (such as\n ``'method:mean'``) then it will only be compared with the\n most recently applied cell method operation.\n\n Alternatively, one or more cell method constucts may be\n identified in a single string with a CF-netCDF cell\n methods-like syntax for describing both the collapse\n dimensions, the collapse method, and any cell method\n construct qualifiers. If N cell methods are described in\n this way then they will collectively identify the N most\n recently applied cell method operations. For example,\n ``'T: maximum within years T: mean over years'`` will be\n compared with the most two most recently applied cell\n method operations.\n\n *Parameter example:*\n ``identity='latitude'``\n\n *Parameter example:*\n ``'T'\n\n *Parameter example:*\n ``'latitude'``\n\n *Parameter example:*\n ``'long_name=Cell Area'``\n\n *Parameter example:*\n ``'cellmeasure1'``\n\n *Parameter example:*\n ``'measure:area'``\n\n *Parameter example:*\n ``cf.eq('time')'``\n\n *Parameter example:*\n ``re.compile('^lat')``\n\n *Parameter example:*\n ``'domainancillary2', 'longitude'``\n\n *Parameter example:*\n ``'area: mean T: maximum'``\n\n *Parameter example:*\n ``'grid_latitude', 'area: mean T: maximum'``\n\n conditions: optional\n Identify the metadata constructs that have any of the\n given identities or construct keys, and whose data satisfy\n conditions.\n\n A construct identity or construct key (as defined by the\n *identities* parameter) is given as a keyword name and a\n condition on its data is given as the keyword value.\n\n The condition is satisfied if any of its data values\n equals the value provided.\n\n *Parameter example:*\n ``longitude=180.0``\n\n *Parameter example:*\n ``time=cf.dt('1959-12-16')``\n\n *Parameter example:*\n ``latitude=cf.ge(0)``\n\n *Parameter example:*\n ``latitude=cf.ge(0), air_pressure=500``\n\n *Parameter example:*\n ``**{'latitude': cf.ge(0), 'long_name=soil_level': 4}``\n\n OR: `bool`, optional\n If True then return `True` if at least one metadata\n construct matches at least one of the criteria given by\n the *identities* or *conditions* arguments. By default\n `True` is only returned if the field constructs matches\n each of the given criteria.\n\n :Returns:\n\n `bool`\n Whether or not the domain construct contains the specfied\n metadata constructs.\n\n **Examples:**\n\n >>> d = cf.example_field(0).domain\n >>> print(d)\n Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north\n : longitude(8) = [22.5, ..., 337.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n >>> d.match_by_construct(\"latitude\")\n True\n >>> d.match_by_construct(\"air_pressure\")\n False\n >>> d.match_by_construct(\"longitude\", \"time\")\n True\n >>> d.match_by_construct(longitude=22.5)\n True\n >>> d.match_by_construct(longitude=15.5)\n False\n >>> d.match_by_construct(longitude=cf.gt(340))\n False\n >>> d.match_by_construct(longitude=cf.gt(240))\n True\n >>> d.match_by_construct(time=cf.dt(\"2019-01-01\"))\n True\n >>> d.match_by_construct(time=cf.dt(\"2020-01-01\"))\n False\n\n \"\"\"\n if identities:\n if identities[0] == \"or\":\n _DEPRECATION_ERROR_ARG(\n self,\n \"match_by_construct\",\n \"or\",\n message=\"Use 'OR=True' instead.\",\n version=\"3.1.0\",\n ) # pragma: no cover\n\n if identities[0] == \"and\":\n _DEPRECATION_ERROR_ARG(\n self,\n \"match_by_construct\",\n \"and\",\n message=\"Use 'OR=False' instead.\",\n version=\"3.1.0\",\n ) # pragma: no cover\n\n if not identities and not conditions:\n return True\n\n constructs = self.constructs\n\n if not constructs:\n return False\n\n n = 0\n\n for identity in identities:\n filtered = constructs(identity)\n if filtered:\n n += 1\n elif not OR:\n return False\n\n if conditions:\n for identity, value in conditions.items():\n if self.subspace(\"test\", **{identity: value}):\n n += 1\n elif not OR:\n return False\n\n if OR:\n return bool(n)\n\n return True\n\n @_inplace_enabled(default=False)\n def roll(self, axis, shift, inplace=False):\n \"\"\"Roll the field along a cyclic axis.\n\n A unique axis is selected with the axes and kwargs parameters.\n\n .. versionadded:: 1.0\n\n .. seealso:: `anchor`, `axis`, `cyclic`, `iscyclic`, `period`\n\n :Parameters:\n\n axis:\n The cyclic axis to be rolled, defined by that which would\n be selected by passing the given axis description to a\n call of the field construct's `domain_axis` method. For\n example, for a value of ``'X'``, the domain axis construct\n returned by ``f.domain_axis('X')`` is selected.\n\n shift: `int`\n The number of places by which the selected cyclic axis is\n to be rolled.\n\n {{inplace: `bool`, optional}}\n\n :Returns:\n\n `Field`\n The rolled field.\n\n **Examples:**\n\n Roll the data of the \"X\" axis one elements to the right:\n\n >>> f.roll('X', 1)\n\n Roll the data of the \"X\" axis three elements to the left:\n\n >>> f.roll('X', -3)\n\n \"\"\"\n # TODODASK - allow multiple roll axes\n\n axis = self.domain_axis(\n axis,\n key=True,\n default=ValueError(\n f\"Can't roll {self.__class__.__name__}. \"\n f\"Bad axis specification: {axis!r}\"\n ),\n )\n\n d = _inplace_enabled_define_and_cleanup(self)\n\n # Roll the metadata constructs in-place\n axes = d._parse_axes(axis)\n d._roll_constructs(axes, shift)\n\n return d\n\n def subspace(self, *mode, **kwargs):\n \"\"\"Create indices that define a subspace of the domain\n construct.\n\n The indices returned by this method be used to create the subspace\n by passing them to the `subspace` method of the original domain\n construct.\n\n The subspace is defined by identifying indices based on the\n metadata constructs.\n\n Metadata constructs are selected conditions are specified on their\n data. Indices for subspacing are then automatically inferred from\n where the conditions are met.\n\n Metadata constructs and the conditions on their data are defined\n by keyword parameters.\n\n * Any domain axes that have not been identified remain unchanged.\n\n * Multiple domain axes may be subspaced simultaneously, and it\n doesn't matter which order they are specified in.\n\n * Explicit indices may also be assigned to a domain axis\n identified by a metadata construct, with either a Python `slice`\n object, or a sequence of integers or booleans.\n\n * For a dimension that is cyclic, a subspace defined by a slice or\n by a `Query` instance is assumed to \"wrap\" around the edges of\n the data.\n\n * Conditions may also be applied to multi-dimensional metadata\n constructs. The \"compress\" mode is still the default mode (see\n the positional arguments), but because the indices may not be\n acting along orthogonal dimensions, some missing data may still\n need to be inserted into the field construct's data.\n\n .. versionadded:: 3.11.0\n\n .. seealso:: `indices`\n\n :Parameters:\n\n mode: `str`, *optional*\n There are two modes of operation, each of which provides\n indices for a different type of subspace:\n\n ============== ==========================================\n *mode* Description\n ============== ==========================================\n ``'compress'`` Return indices that identify only the\n requested locations.\n\n This is the default mode.\n\n Note that if a multi-dimensional metadata\n construct is being used to define the\n indices then some unrequested locations\n may also be selected.\n\n ``'envelope'`` The returned subspace is the smallest that\n contains all of the requested locations.\n\n ``'test'`` May be used on its own or in addition to\n one of the other positional arguments. Do\n not create a subspace, but return `True`\n or `False` depending on whether or not it\n is possible to create the specified\n subspace.\n ============== ==========================================\n\n kwargs: *optional*\n A keyword name is an identity of a metadata construct, and\n the keyword value provides a condition for inferring\n indices that apply to the dimension (or dimensions)\n spanned by the metadata construct's data. Indices are\n created that select every location for which the metadata\n construct's data satisfies the condition.\n\n :Returns:\n\n `Domain` or `bool`\n An independent domain construct containing the subspace of\n the original domain. If the ``'test'`` positional argument\n has been set then return `True` or `False` depending on\n whether or not it is possible to create specified\n subspace.\n\n **Examples:**\n\n >>> d = cf.example_field(0).domain\n >>> print(d)\n Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north\n : longitude(8) = [22.5, ..., 337.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n >>> print(d.subspace(X=112.5))\n Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north\n : longitude(1) = [112.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n\n >>> print(d.indices(X=112.5, Y=cf.wi(-60, 30)))\n Dimension coords: latitude(2) = [-45.0, 0.0] degrees_north\n : longitude(1) = [112.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n\n >>> print(d.indices(X=[-1, 0], Y=slice(1, -1))\n Dimension coords: latitude(3) = [-45.0, 0.0, 45.0] degrees_north\n : longitude(2) = [337.5, 22.5] degrees_east\n : time(1) = [2019-01-01 00:00:00]\n\n \"\"\"\n logger.debug(\n f\"{self.__class__.__name__}.subspace\\n\"\n f\" input kwargs = {kwargs}\"\n ) # pragma: no cover\n\n test = False\n if \"test\" in mode:\n mode = list(mode)\n mode.remove(\"test\")\n test = True\n\n if not mode and not kwargs:\n if test:\n return True\n\n return self.copy()\n\n try:\n indices = self.indices(*mode, **kwargs)\n except ValueError as error:\n if test:\n return False\n\n raise ValueError(error)\n\n if test:\n return True\n\n domain_axes = self.domain_axes(todict=True)\n\n axes = []\n shape = []\n indices2 = []\n for a, b in indices.items():\n axes.append(a)\n shape.append(domain_axes[a].get_size())\n indices2.append(b)\n\n indices, roll = parse_indices(\n tuple(shape), tuple(indices2), cyclic=True\n )\n\n logger.debug(\n f\" axes = {axes!r}\\n\"\n f\" parsed indices = {indices!r}\\n\"\n f\" roll = {roll!r}\"\n ) # pragma: no cover\n\n if roll:\n new = self\n cyclic_axes = self.cyclic()\n for iaxis, shift in roll.items():\n axis = axes[iaxis]\n if axis not in cyclic_axes:\n raise IndexError(\n \"Can't take a cyclic slice from non-cyclic \"\n f\"{self.constructs.domain_axis_identity(axis)!r} \"\n \"axis\"\n )\n\n new = new.roll(axis, shift)\n else:\n new = self.copy()\n\n # ------------------------------------------------------------\n # Set sizes of domain axes\n # ------------------------------------------------------------\n domain_axes = new.domain_axes(todict=True)\n for axis, index in zip(axes, indices):\n if isinstance(index, slice):\n old_size = domain_axes[axis].get_size()\n start, stop, step = index.indices(old_size)\n size = abs((stop - start) / step)\n int_size = round(size)\n if size > int_size:\n size = int_size + 1\n else:\n size = int_size\n else:\n size = np.size(index)\n\n domain_axes[axis].set_size(size)\n\n # ------------------------------------------------------------\n # Subspace constructs that have data\n # ------------------------------------------------------------\n construct_data_axes = new.constructs.data_axes()\n\n for key, construct in new.constructs.filter_by_data().items():\n construct_axes = construct_data_axes[key]\n dice = [indices[axes.index(axis)] for axis in construct_axes]\n\n # Replace existing construct with its subspace\n new.set_construct(\n construct[tuple(dice)],\n key=key,\n axes=construct_axes,\n copy=False,\n )\n\n return new\n\n @_inplace_enabled(default=False)\n def transpose(self, axes, inplace=False):\n \"\"\"Permute the data axes of the metadata constructs.\n\n Each metadata construct has its data axis order changed to the\n relative ordering defined by the *axes* parameter. For instance,\n if the given *axes* are ``['X', 'Z', 'Y']`` then a metadata\n construct whose data axis order is ('Y', 'X') will be tranposed to\n have data order ('X', 'Y').\n\n .. versionadded:: 3.11.0\n\n .. seealso:: `domain_axis`, `flip`\n\n :Parameters:\n\n axes: sequence of `str`\n Define the new domain axis order.\n\n A domain axis is identified by that which would be\n selected by passing a given axis description to a call of\n the `domain_axis` method. For example, a value of ``'X'``\n would select the domain axis construct returned by\n ``f.domain_axis('X')``.\n\n Each domain axis of the domain construct data must be\n specified.\n\n constructs: `bool`, optional\n If True then metadata constructs are also transposed so\n that their axes are in the same relative order as in the\n transposed data array of the field. By default metadata\n constructs are not altered.\n\n {{inplace: `bool`, optional}}\n\n :Returns:\n\n `Domain` or `None`\n The domain construct with transposed constructs, or `None`\n if the operation was in-place.\n\n **Examples:**\n\n >>> d = cf.example_field(7).domain\n >>> print(d)\n Dimension coords: time(3) = [1979-05-01 12:00:00, 1979-05-02 12:00:00, 1979-05-03 12:00:00] gregorian\n : air_pressure(1) = [850.0] hPa\n : grid_latitude(4) = [0.44, ..., -0.88] degrees\n : grid_longitude(5) = [-1.18, ..., 0.58] degrees\n Auxiliary coords: latitude(grid_latitude(4), grid_longitude(5)) = [[52.4243, ..., 51.1163]] degrees_north\n : longitude(grid_latitude(4), grid_longitude(5)) = [[8.0648, ..., 10.9238]] degrees_east\n Coord references: grid_mapping_name:rotated_latitude_longitude\n\n\n >>> print(d.transpose(['X', 'T', 'Y', 'Z']))\n Dimension coords: time(3) = [1979-05-01 12:00:00, 1979-05-02 12:00:00, 1979-05-03 12:00:00] gregorian\n : air_pressure(1) = [850.0] hPa\n : grid_latitude(4) = [0.44, ..., -0.88] degrees\n : grid_longitude(5) = [-1.18, ..., 0.58] degrees\n Auxiliary coords: latitude(grid_longitude(5), grid_latitude(4)) = [[52.4243, ..., 51.1163]] degrees_north\n : longitude(grid_longitude(5), grid_latitude(4)) = [[8.0648, ..., 10.9238]] degrees_east\n Coord references: grid_mapping_name:rotated_latitude_longitude\n\n \"\"\"\n d = _inplace_enabled_define_and_cleanup(self)\n\n # Parse the axes\n if axes is None:\n raise ValueError(\n f\"Can't transpose {self.__class__.__name__}. \"\n f\"Must provide an order for all axes. Got: {axes}\"\n )\n\n axes = d._parse_axes(axes)\n\n rank = self.rank\n if len(set(axes)) != rank:\n raise ValueError(\n f\"Can't transpose {self.__class__.__name__}. \"\n f\"Must provide an unambiguous order for all \"\n f\"{rank} domain axes. Got: {axes}\"\n )\n\n data_axes = d.constructs.data_axes()\n for key, construct in d.constructs.filter_by_data().items():\n construct_axes = data_axes[key]\n\n if len(construct_axes) < 2:\n # No need to transpose 1-d constructs\n continue\n\n # Transpose the construct\n iaxes = [\n construct_axes.index(a) for a in axes if a in construct_axes\n ]\n construct.transpose(iaxes, inplace=True)\n\n # Update the axis order\n new_axes = [construct_axes[i] for i in iaxes]\n d.set_data_axes(axes=new_axes, key=key)\n\n return d\n"
] |
[
[
"numpy.size"
]
] |
swipswaps/Object-Detection-API
|
[
"995f7d4cb41d3976f9264cbabcffd167a06dfa2a"
] |
[
"yolov3_tf2/models.py"
] |
[
"from absl import flags\r\nfrom absl.flags import FLAGS\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import Model\r\nfrom tensorflow.keras.layers import (\r\n Add,\r\n Concatenate,\r\n Conv2D,\r\n Input,\r\n Lambda,\r\n LeakyReLU,\r\n MaxPool2D,\r\n UpSampling2D,\r\n ZeroPadding2D,\r\n)\r\nfrom tensorflow.keras.regularizers import l2\r\nfrom tensorflow.keras.losses import (\r\n binary_crossentropy,\r\n sparse_categorical_crossentropy\r\n)\r\nfrom .batch_norm import BatchNormalization\r\nfrom .utils import broadcast_iou\r\n\r\n# customize your model through the following parameters\r\nyolo_max_boxes= 100 #maximum number of boxes detected per image\r\nyolo_iou_threshold = 0.5\r\nyolo_score_threshold = 0.5\r\n\r\n\r\n#flags.DEFINE_float('yolo_iou_threshold', 0.5, 'iou threshold')\r\n#flags.DEFINE_float('yolo_score_threshold', 0.5, 'score threshold')\r\n\r\nyolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),\r\n (59, 119), (116, 90), (156, 198), (373, 326)],\r\n np.float32) / 416\r\nyolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])\r\n\r\nyolo_tiny_anchors = np.array([(10, 14), (23, 27), (37, 58),\r\n (81, 82), (135, 169), (344, 319)],\r\n np.float32) / 416\r\nyolo_tiny_anchor_masks = np.array([[3, 4, 5], [0, 1, 2]])\r\n\r\n\r\ndef DarknetConv(x, filters, size, strides=1, batch_norm=True):\r\n if strides == 1:\r\n padding = 'same'\r\n else:\r\n x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding\r\n padding = 'valid'\r\n x = Conv2D(filters=filters, kernel_size=size,\r\n strides=strides, padding=padding,\r\n use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)\r\n if batch_norm:\r\n x = BatchNormalization()(x)\r\n x = LeakyReLU(alpha=0.1)(x)\r\n return x\r\n\r\n\r\ndef DarknetResidual(x, filters):\r\n prev = x\r\n x = DarknetConv(x, filters // 2, 1)\r\n x = DarknetConv(x, filters, 3)\r\n x = Add()([prev, x])\r\n return x\r\n\r\n\r\ndef DarknetBlock(x, filters, blocks):\r\n x = DarknetConv(x, filters, 3, strides=2)\r\n for _ in range(blocks):\r\n x = DarknetResidual(x, filters)\r\n return x\r\n\r\n\r\ndef Darknet(name=None):\r\n x = inputs = Input([None, None, 3])\r\n x = DarknetConv(x, 32, 3)\r\n x = DarknetBlock(x, 64, 1)\r\n x = DarknetBlock(x, 128, 2) # skip connection\r\n x = x_36 = DarknetBlock(x, 256, 8) # skip connection\r\n x = x_61 = DarknetBlock(x, 512, 8)\r\n x = DarknetBlock(x, 1024, 4)\r\n return tf.keras.Model(inputs, (x_36, x_61, x), name=name)\r\n\r\n\r\ndef DarknetTiny(name=None):\r\n x = inputs = Input([None, None, 3])\r\n x = DarknetConv(x, 16, 3)\r\n x = MaxPool2D(2, 2, 'same')(x)\r\n x = DarknetConv(x, 32, 3)\r\n x = MaxPool2D(2, 2, 'same')(x)\r\n x = DarknetConv(x, 64, 3)\r\n x = MaxPool2D(2, 2, 'same')(x)\r\n x = DarknetConv(x, 128, 3)\r\n x = MaxPool2D(2, 2, 'same')(x)\r\n x = x_8 = DarknetConv(x, 256, 3) # skip connection\r\n x = MaxPool2D(2, 2, 'same')(x)\r\n x = DarknetConv(x, 512, 3)\r\n x = MaxPool2D(2, 1, 'same')(x)\r\n x = DarknetConv(x, 1024, 3)\r\n return tf.keras.Model(inputs, (x_8, x), name=name)\r\n\r\n\r\ndef YoloConv(filters, name=None):\r\n def yolo_conv(x_in):\r\n if isinstance(x_in, tuple):\r\n inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])\r\n x, x_skip = inputs\r\n\r\n # concat with skip connection\r\n x = DarknetConv(x, filters, 1)\r\n x = UpSampling2D(2)(x)\r\n x = Concatenate()([x, x_skip])\r\n else:\r\n x = inputs = Input(x_in.shape[1:])\r\n\r\n x = DarknetConv(x, filters, 1)\r\n x = DarknetConv(x, filters * 2, 3)\r\n x = DarknetConv(x, filters, 1)\r\n x = DarknetConv(x, filters * 2, 3)\r\n x = DarknetConv(x, filters, 1)\r\n return Model(inputs, x, name=name)(x_in)\r\n return yolo_conv\r\n\r\n\r\ndef YoloConvTiny(filters, name=None):\r\n def yolo_conv(x_in):\r\n if isinstance(x_in, tuple):\r\n inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])\r\n x, x_skip = inputs\r\n\r\n # concat with skip connection\r\n x = DarknetConv(x, filters, 1)\r\n x = UpSampling2D(2)(x)\r\n x = Concatenate()([x, x_skip])\r\n else:\r\n x = inputs = Input(x_in.shape[1:])\r\n x = DarknetConv(x, filters, 1)\r\n\r\n return Model(inputs, x, name=name)(x_in)\r\n return yolo_conv\r\n\r\n\r\ndef YoloOutput(filters, anchors, classes, name=None):\r\n def yolo_output(x_in):\r\n x = inputs = Input(x_in.shape[1:])\r\n x = DarknetConv(x, filters * 2, 3)\r\n x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)\r\n x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],\r\n anchors, classes + 5)))(x)\r\n return tf.keras.Model(inputs, x, name=name)(x_in)\r\n return yolo_output\r\n\r\n\r\ndef yolo_boxes(pred, anchors, classes):\r\n # pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))\r\n grid_size = tf.shape(pred)[1]\r\n box_xy, box_wh, objectness, class_probs = tf.split(\r\n pred, (2, 2, 1, classes), axis=-1)\r\n\r\n box_xy = tf.sigmoid(box_xy)\r\n objectness = tf.sigmoid(objectness)\r\n class_probs = tf.sigmoid(class_probs)\r\n pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss\r\n\r\n # !!! grid[x][y] == (y, x)\r\n grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))\r\n grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]\r\n\r\n box_xy = (box_xy + tf.cast(grid, tf.float32)) / \\\r\n tf.cast(grid_size, tf.float32)\r\n box_wh = tf.exp(box_wh) * anchors\r\n\r\n box_x1y1 = box_xy - box_wh / 2\r\n box_x2y2 = box_xy + box_wh / 2\r\n bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)\r\n\r\n return bbox, objectness, class_probs, pred_box\r\n\r\n\r\ndef yolo_nms(outputs, anchors, masks, classes):\r\n # boxes, conf, type\r\n b, c, t = [], [], []\r\n\r\n for o in outputs:\r\n b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))\r\n c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))\r\n t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))\r\n\r\n bbox = tf.concat(b, axis=1)\r\n confidence = tf.concat(c, axis=1)\r\n class_probs = tf.concat(t, axis=1)\r\n\r\n scores = confidence * class_probs\r\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\r\n boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),\r\n scores=tf.reshape(\r\n scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),\r\n max_output_size_per_class=yolo_max_boxes,\r\n max_total_size=yolo_max_boxes,\r\n iou_threshold=yolo_iou_threshold,\r\n score_threshold=yolo_score_threshold\r\n )\r\n\r\n return boxes, scores, classes, valid_detections\r\n\r\n\r\ndef YoloV3(size=None, channels=3, anchors=yolo_anchors,\r\n masks=yolo_anchor_masks, classes=80, training=False):\r\n x = inputs = Input([size, size, channels], name='input')\r\n\r\n x_36, x_61, x = Darknet(name='yolo_darknet')(x)\r\n\r\n x = YoloConv(512, name='yolo_conv_0')(x)\r\n output_0 = YoloOutput(512, len(masks[0]), classes, name='yolo_output_0')(x)\r\n\r\n x = YoloConv(256, name='yolo_conv_1')((x, x_61))\r\n output_1 = YoloOutput(256, len(masks[1]), classes, name='yolo_output_1')(x)\r\n\r\n x = YoloConv(128, name='yolo_conv_2')((x, x_36))\r\n output_2 = YoloOutput(128, len(masks[2]), classes, name='yolo_output_2')(x)\r\n\r\n if training:\r\n return Model(inputs, (output_0, output_1, output_2), name='yolov3')\r\n\r\n boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),\r\n name='yolo_boxes_0')(output_0)\r\n boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),\r\n name='yolo_boxes_1')(output_1)\r\n boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),\r\n name='yolo_boxes_2')(output_2)\r\n\r\n outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),\r\n name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))\r\n\r\n return Model(inputs, outputs, name='yolov3')\r\n\r\n\r\ndef YoloV3Tiny(size=None, channels=3, anchors=yolo_tiny_anchors,\r\n masks=yolo_tiny_anchor_masks, classes=80, training=False):\r\n x = inputs = Input([size, size, channels], name='input')\r\n\r\n x_8, x = DarknetTiny(name='yolo_darknet')(x)\r\n\r\n x = YoloConvTiny(256, name='yolo_conv_0')(x)\r\n output_0 = YoloOutput(256, len(masks[0]), classes, name='yolo_output_0')(x)\r\n\r\n x = YoloConvTiny(128, name='yolo_conv_1')((x, x_8))\r\n output_1 = YoloOutput(128, len(masks[1]), classes, name='yolo_output_1')(x)\r\n\r\n if training:\r\n return Model(inputs, (output_0, output_1), name='yolov3')\r\n\r\n boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),\r\n name='yolo_boxes_0')(output_0)\r\n boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),\r\n name='yolo_boxes_1')(output_1)\r\n outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),\r\n name='yolo_nms')((boxes_0[:3], boxes_1[:3]))\r\n return Model(inputs, outputs, name='yolov3_tiny')\r\n\r\n\r\ndef YoloLoss(anchors, classes=80, ignore_thresh=0.5):\r\n def yolo_loss(y_true, y_pred):\r\n # 1. transform all pred outputs\r\n # y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))\r\n pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(\r\n y_pred, anchors, classes)\r\n pred_xy = pred_xywh[..., 0:2]\r\n pred_wh = pred_xywh[..., 2:4]\r\n\r\n # 2. transform all true outputs\r\n # y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))\r\n true_box, true_obj, true_class_idx = tf.split(\r\n y_true, (4, 1, 1), axis=-1)\r\n true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2\r\n true_wh = true_box[..., 2:4] - true_box[..., 0:2]\r\n\r\n # give higher weights to small boxes\r\n box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]\r\n\r\n # 3. inverting the pred box equations\r\n grid_size = tf.shape(y_true)[1]\r\n grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))\r\n grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)\r\n true_xy = true_xy * tf.cast(grid_size, tf.float32) - \\\r\n tf.cast(grid, tf.float32)\r\n true_wh = tf.math.log(true_wh / anchors)\r\n true_wh = tf.where(tf.math.is_inf(true_wh),\r\n tf.zeros_like(true_wh), true_wh)\r\n\r\n # 4. calculate all masks\r\n obj_mask = tf.squeeze(true_obj, -1)\r\n # ignore false positive when iou is over threshold\r\n best_iou = tf.map_fn(\r\n lambda x: tf.reduce_max(broadcast_iou(x[0], tf.boolean_mask(\r\n x[1], tf.cast(x[2], tf.bool))), axis=-1),\r\n (pred_box, true_box, obj_mask),\r\n tf.float32)\r\n ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)\r\n\r\n # 5. calculate all losses\r\n xy_loss = obj_mask * box_loss_scale * \\\r\n tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)\r\n wh_loss = obj_mask * box_loss_scale * \\\r\n tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)\r\n obj_loss = binary_crossentropy(true_obj, pred_obj)\r\n obj_loss = obj_mask * obj_loss + \\\r\n (1 - obj_mask) * ignore_mask * obj_loss\r\n # TODO: use binary_crossentropy instead\r\n class_loss = obj_mask * sparse_categorical_crossentropy(\r\n true_class_idx, pred_class)\r\n\r\n # 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)\r\n xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))\r\n wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))\r\n obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))\r\n class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))\r\n\r\n return xy_loss + wh_loss + obj_loss + class_loss\r\n return yolo_loss\r\n"
] |
[
[
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.keras.layers.ZeroPadding2D",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.squeeze",
"tensorflow.square",
"tensorflow.keras.layers.Add",
"tensorflow.math.is_inf",
"tensorflow.shape",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.exp",
"tensorflow.keras.Model",
"tensorflow.zeros_like",
"tensorflow.keras.losses.binary_crossentropy",
"tensorflow.split",
"numpy.array",
"tensorflow.range",
"tensorflow.sigmoid",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.math.log",
"tensorflow.keras.layers.Input"
]
] |
phaustin/planck_lib
|
[
"01564ebf8bb0a028a5d26ff338a3652eb4c93f94"
] |
[
"src/planck_lib/planck.py"
] |
[
"# ---\n# jupyter:\n# jupytext:\n# cell_metadata_filter: -all\n# formats: ipynb,py:light\n# notebook_metadata_filter: all,-language_info,-toc,-latex_envs\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.3.1\n# latex_metadata:\n# chead: center head\n# lhead: left head\n# ---\n\nimport numpy as np\nfrom scipy.integrate import quad\nimport pdb\n\nclight = 2.99792458e+08 #m/s -- speed of light in vacumn\nh = 6.62606876e-34 #J s -- Planck's constant\nkb = 1.3806503e-23 # J/K -- Boltzman's constant\nc1 = 2. * h * clight**2.\nc2 = h * clight / kb\nc3 = 2 * h / clight**2.\nc4 = h / kb\n\nsigma = 2. * np.pi**5. * kb**4. / (15 * h**3. * clight**2.)\n\n\ndef planckDeriv(wavel, Temp):\n \"\"\"\n input: wavel in m, Temp in K\n output: dBlambda/dlambda W/m^2/m/sr/m\n \"\"\"\n expterm = np.exp(c2 / (wavel * Temp))\n deriv = c1 / np.pi * wavel**(-6.) * (expterm -\n 1)**(-2.) * c2 / Temp**2. * expterm\n return deriv\n\n\ndef planckwavelen(wavel, Temp):\n \"\"\"\n input: wavelength (m), Temp (K)\n output: planck function W/m^2/m/sr\n \"\"\"\n Blambda = c1 / (wavel**5. * (np.exp(c2 / (wavel * Temp)) - 1))\n return Blambda\n\n\ndef planckfreq(freq, Temp):\n \"\"\"\n input: freq (Hz), Temp (K)\n output: planck function in W/m^2/Hz/sr\n \"\"\"\n Bfreq = c3 * freq**3. / (np.exp(c4 * freq / Temp) - 1)\n #pdb.set_trace()\n return Bfreq\n\n\ndef planckwavenum(waven, Temp):\n \"\"\"\n input: wavenumber (m^{-1}), Temp (K)\n output: planck function in W/m^2/m^{-1}/sr\n \"\"\"\n Bwaven = c1 * waven**3. / (np.exp(c2 * waven / Temp) - 1)\n return Bwaven\n\n\ndef planckInvert(wavel, Blambda):\n \"\"\"input wavelength in m and Blambda in W/m^2/m/sr, output\n output brightness temperature in K\n \"\"\"\n Tbright = c2 / (wavel * np.log(c1 / (wavel**5. * Blambda) + 1.))\n return Tbright\n\n\ndef planckInt(planck_fn, Temp, lower, upper):\n \"\"\"Integrate planckwavelen given temperatue Temp (K) from lower (m) to upper (m) wavelengths\n\n output: integrated radiance in W/m^2/sr\n see http://docs.scipy.org/doc/scipy-0.14.0/reference/integrate.html#module-scipy.integrate\n \"\"\"\n args = (Temp)\n integ = quad(planck_fn, lower, upper, args)\n return integ[0]\n\n\ndef goodInvert(T0, bbr, wavel):\n B0 = planckwavelen(wavel, T0)\n theDeriv = planckDeriv(wavel, T0)\n delB = bbr - B0\n delT = delB / theDeriv\n theT = T0 + delT\n return theT\n\n\ndef rootfind(T0, bbrVec, wavel):\n bbrVec = np.asarray(bbrVec)\n guess = planckwavelen(T0, wavel)\n out = []\n for bbr in bbrVec:\n while np.fabs(bbr - guess) > 1.e-8:\n delB = bbr - guess\n deriv = planckDeriv(wavel, T0)\n delT = delB / deriv\n T0 = T0 + delT\n guess = planckwavelen(wavel, T0)\n out.append(T0)\n return out\n\n\ndef test_planck_wavelen():\n \"\"\"\n test planck function for several wavelengths\n and Temps\n \"\"\"\n #\n # need Temp in K and wavelen in m\n #\n the_temps = [200., 250., 350.]\n the_wavelens = np.array([8., 10., 12.]) * 1.e-6\n out = []\n for a_temp in the_temps:\n for a_wavelen in the_wavelens:\n #\n # convert to W/m^2/micron/sr\n #\n the_bbr = planckwavelen(a_wavelen, a_temp) * 1.e-6\n out.append(the_bbr)\n answer = [\n 0.4521, 0.8954, 1.1955, 2.7324, 3.7835, 3.9883, 21.4495, 19.8525,\n 16.0931\n ]\n np.testing.assert_array_almost_equal(out, answer, decimal=4)\n return None\n\n\ndef test_planck_wavenum():\n \"\"\"\n test planck function for several wavelengths\n and Temps\n \"\"\"\n #\n # need Temp in K and wavelen in m\n #\n the_temps = [200., 250., 350.]\n the_wavelens = np.array([8., 10., 12.]) * 1.e-6\n the_wavenums = 1 / the_wavelens\n out = []\n for a_temp in the_temps:\n for a_wavenum in the_wavenums:\n #\n # convert to W/m^2/micron/sr\n #\n the_bbr = planckwavenum(a_wavenum, a_temp)\n out.append(the_bbr)\n answer = [\n 2.8932e-05, 8.9535e-05, 1.7215e-04, 1.7487e-04, 3.7835e-04, 5.7431e-04,\n 1.3728e-03, 1.9852e-03, 2.3174e-03\n ]\n np.testing.assert_array_almost_equal(out, answer, decimal=4)\n return None\n\n\ndef test_planck_freq():\n \"\"\"\n test planck function for several wavelengths\n and Temps\n \"\"\"\n #\n # need Temp in K and wavelen in m\n #\n the_temps = [200., 250., 350.]\n the_wavelens = np.array([8., 10., 12.]) * 1.e-6\n the_wavenums = 1 / the_wavelens\n the_freqs = the_wavenums * clight\n out = []\n for a_temp in the_temps:\n for a_freq in the_freqs:\n the_bbr = planckfreq(a_freq, a_temp)\n out.append(the_bbr)\n answer = [\n 9.6508e-14, 2.9866e-13, 5.7424e-13, 5.8331e-13, 1.2620e-12, 1.9157e-12,\n 4.5791e-12, 6.6221e-12, 7.7300e-12\n ]\n np.testing.assert_array_almost_equal(out, answer, decimal=4)\n return None\n\n\ndef test_planck_inverse():\n \"\"\"\n test planck inverse for several round trips\n and Temps\n \"\"\"\n #\n # need Temp in K and wavelen in m\n #\n the_temps = [200., 250., 350.]\n the_wavelens = np.array([8., 10., 12.]) * 1.e-6\n out = []\n for a_temp in the_temps:\n for a_wavelen in the_wavelens:\n #\n # convert to W/m^2/micron/sr\n #\n the_bbr = planckwavelen(a_wavelen, a_temp)\n out.append((a_wavelen, the_bbr))\n\n brights = []\n for wavelen, bbr in out:\n brights.append(planckInvert(wavelen, bbr))\n answer = [200.0, 200.0, 200.0, 250.0, 250.0, 250.0, 350.0, 350.0, 350.0]\n np.testing.assert_array_almost_equal(brights, answer, decimal=10)\n return None\n\n\ndef test_planck_wavelen_integral():\n \"\"\"\n integrage and compare with stefan-boltzman\n \"\"\"\n Temp = 300.\n stefan = sigma / np.pi * Temp**4.\n totrad = planckInt(planckwavelen, Temp, 1.e-7, 8000.e-6)\n np.testing.assert_almost_equal(totrad, stefan, decimal=5)\n return None\n\n\ndef test_planck_wavenum_integral():\n \"\"\"\n integrage and compare with stefan-boltzman\n \"\"\"\n Temp = 300.\n stefan = sigma / np.pi * Temp**4.\n left = 1. / 8000.e-6\n right = 1 / 1.e-7\n totrad = planckInt(planckwavenum, Temp, left, right)\n np.testing.assert_almost_equal(totrad, stefan, decimal=5)\n return None\n\n\ndef test_planck_freq_integral():\n \"\"\"\n integrage and compare with stefan-boltzman\n \"\"\"\n Temp = 300.\n stefan = sigma / np.pi * Temp**4.\n left = (1. / 8000.e-6) * clight\n right = (1 / 1.e-7) * clight\n print(left, right)\n totrad = planckInt(planckfreq, Temp, left, right)\n np.testing.assert_almost_equal(totrad, stefan, decimal=5)\n return None\n\n# this trick will run the following script if\n# the file planck.py is run as a program, but won't\n# if planck.py is imported from another module\n\n\nif __name__ == '__main__':\n\n test_planck_wavelen()\n test_planck_wavenum()\n test_planck_freq()\n test_planck_inverse()\n test_planck_wavelen_integral()\n test_planck_wavenum_integral()\n test_planck_freq_integral()\n"
] |
[
[
"numpy.log",
"numpy.asarray",
"numpy.testing.assert_almost_equal",
"scipy.integrate.quad",
"numpy.array",
"numpy.exp",
"numpy.fabs",
"numpy.testing.assert_array_almost_equal"
]
] |
ShichengChen/ChenPyLib
|
[
"758054279fac0d8bc103dc1dcf9e799f3b940da5"
] |
[
"cscPy/Nets/dVAEnet.py"
] |
[
"import torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torch.distributions.normal\nimport numpy as np\nimport math\nimport torchvision.models as models\nimport torch.nn.functional as F\nPOINT_SIZE = 256\nDecPointSize = 256\nDecUVSize = np.sqrt(DecPointSize)\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torch.distributions.normal\nimport numpy as np\nimport math\n\nclass SixLinear(nn.Module):\n def __init__(self,id,d=128):\n super(SixLinear,self).__init__()\n self.l = nn.Sequential(nn.Linear(id, d),nn.BatchNorm1d(d), nn.ReLU(),\n nn.Linear(d, d),nn.BatchNorm1d(d), nn.ReLU(),\n nn.Linear(d, d),nn.BatchNorm1d(d), nn.ReLU(),\n nn.Linear(d, d),nn.BatchNorm1d(d), nn.ReLU(),\n nn.Linear(d, d),nn.BatchNorm1d(d), nn.ReLU(),\n nn.Linear(d, d))\n def forward(self, x):return self.l(x)\n\nclass decCPoseNet(nn.Module):\n def __init__(self, n_latent=16):\n super(decCPoseNet, self).__init__()\n self.l=nn.Sequential(SixLinear(n_latent), nn.Linear(128, 21*3),)\n def forward(self, x):\n return self.l(x).view(-1,21*3)\nclass decPoseNet(nn.Module):\n def __init__(self, n_latent=32):\n super(decPoseNet, self).__init__()\n self.l=nn.Sequential(SixLinear(n_latent), nn.Linear(128, 21*3),)\n def forward(self, x):\n return self.l(x).view(-1,21*3)\nclass decViewNet(nn.Module):\n def __init__(self, n_latent=16):\n super(decViewNet, self).__init__()\n self.l=nn.Sequential(SixLinear(n_latent), nn.Linear(128, 3*3),)\n def forward(self, x):\n return self.l(x).view(-1,3, 3)\n\nclass encCPoseNet(nn.Module):\n def __init__(self, id=21*3,n_latent=16):\n super(encCPoseNet, self).__init__()\n self.l = nn.Sequential(SixLinear(id),nn.BatchNorm1d(128), nn.Linear(128, 128), nn.ReLU())\n self.l1 = nn.Sequential(nn.Linear(128, n_latent), nn.Tanh())\n self.l2 = nn.Sequential(nn.Linear(128, n_latent), nn.Softplus())\n def forward(self, x,training):\n x = self.l(x.view(-1, 21*3))\n mn = self.l1(x) * 2.0\n sd = torch.clamp(self.l2(x) + 1e-9, 1e-9, 100)\n m = torch.distributions.normal.Normal(torch.zeros_like(mn), torch.ones_like(mn))\n epsilon = m.sample()\n if training:\n z = mn + sd * epsilon\n else:\n z = mn\n return z, mn, sd\n\nclass encViewNet(nn.Module):\n def __init__(self, id=3*3,n_latent=16):\n super(encViewNet, self).__init__()\n self.l = nn.Sequential(SixLinear(id),nn.BatchNorm1d(128), nn.Linear(128, 128), nn.ReLU())\n self.l1 = nn.Sequential(nn.Linear(128, n_latent), nn.Tanh())\n self.l2 = nn.Sequential(nn.Linear(128, n_latent), nn.Softplus())\n def forward(self, x,training):\n x=self.l(x.view(-1,9))\n mn = self.l1(x) * 2.0\n sd = torch.clamp(self.l2(x) + 1e-9, 1e-9, 100)\n m = torch.distributions.normal.Normal(torch.zeros_like(mn), torch.ones_like(mn))\n epsilon = m.sample()\n if training:\n z = mn + sd * epsilon\n else:\n z = mn\n return z, mn, sd\n\nclass encoderRGB(nn.Module):\n def __init__(self, n_latent=32):\n super(encoderRGB, self).__init__()\n self.res=models.resnet18(pretrained=True).cuda()\n self.res.fc=nn.Sequential(nn.Linear(512,1000))\n self.l = nn.Sequential(nn.BatchNorm1d(1000), nn.Linear(1000, 128))\n self.l1 = nn.Sequential(nn.Linear(128, n_latent), nn.Tanh())\n self.l2 = nn.Sequential(nn.Linear(128, n_latent), nn.Softplus())\n def forward(self, x,training):\n x = self.res(x)\n x = self.l(x)\n mn = self.l1(x) * 2.0\n sd = torch.clamp(self.l2(x) + 1e-9, 1e-9, 100)\n m = torch.distributions.normal.Normal(torch.zeros_like(mn), torch.ones_like(mn))\n epsilon = m.sample()\n if training:\n z = mn + sd * epsilon\n else:\n z = mn\n return z, mn, sd\n\nimport utils\nclass VAE(nn.Module):\n def __init__(self, n_latent=32):\n super(VAE, self).__init__()\n self.enCpose=encCPoseNet()\n self.enV=encViewNet()\n self.enRGB=encoderRGB()\n self.dPose=decPoseNet()\n self.dCpose=decCPoseNet()\n self.dV=decViewNet()\n\n\n def forward(self, img,cpose,mat, phase,training):\n if(phase==0):\n zp,mnp,sdp=self.enCpose(cpose,training)\n zv,mnv,sdv=self.enV(mat,training)\n\n z=torch.cat([zp,zv],dim=1)\n pose=self.dPose(z)\n cpose=self.dCpose(zp)\n view=self.dV(zv)\n\n return mnp, sdp,mnv,sdv, pose, cpose, view\n else:\n z,mn,sd=self.enRGB(img,training)\n pose=self.dPose(z)\n cpose=self.dCpose(z[:,:16])\n view=self.dV(z[:,16:])\n return mn,sd,pose,cpose,view\n\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.Softplus",
"numpy.sqrt",
"torch.cat",
"torch.zeros_like",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.ones_like"
]
] |
liruilong940607/NSVF
|
[
"6733c8f3805c7febfe78c39710beafc5d33ccbcd"
] |
[
"fairnr/models/nsvf.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport cv2, math, time\nimport numpy as np\nfrom collections import defaultdict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom fairseq.models import (\n register_model,\n register_model_architecture\n)\nfrom fairseq.utils import item\nfrom fairnr.data.geometry import compute_normal_map, fill_in\nfrom fairnr.models.nerf import NeRFModel\n\n\n@register_model('nsvf')\nclass NSVFModel(NeRFModel):\n\n READER = 'image_reader'\n ENCODER = 'sparsevoxel_encoder'\n FIELD = 'radiance_field'\n RAYMARCHER = 'volume_rendering'\n\n @classmethod\n def add_args(cls, parser):\n super().add_args(parser)\n parser.add_argument('--fine-num-sample-ratio', type=float, default=0,\n help='raito of samples compared to the first pass')\n parser.add_argument('--inverse-distance-coarse-sampling', type=str,\n choices=['none', 'camera', 'origin'], default='none',\n help='if set, we do not sample points uniformly through voxels.')\n\n def intersecting(self, ray_start, ray_dir, encoder_states, **kwargs):\n S = ray_dir.size(0)\n ray_start, ray_dir, intersection_outputs, hits, _ = \\\n super().intersecting(ray_start, ray_dir, encoder_states, **kwargs)\n\n if self.reader.no_sampling and self.training: # sample points after ray-voxel intersection\n uv, size = kwargs['uv'], kwargs['size']\n mask = hits.reshape(*uv.size()[:2], uv.size(-1))\n\n # sample rays based on voxel intersections\n sampled_uv, sampled_masks = self.reader.sample_pixels(\n uv, size, mask=mask, return_mask=True)\n sampled_masks = sampled_masks.reshape(uv.size(0), -1).bool()\n hits, sampled_masks = hits[sampled_masks].reshape(S, -1), sampled_masks.unsqueeze(-1)\n intersection_outputs = {name: outs[sampled_masks.expand_as(outs)].reshape(S, -1, outs.size(-1))\n for name, outs in intersection_outputs.items()}\n ray_start = ray_start[sampled_masks.expand_as(ray_start)].reshape(S, -1, 3)\n ray_dir = ray_dir[sampled_masks.expand_as(ray_dir)].reshape(S, -1, 3)\n\n else:\n sampled_uv = None\n\n min_depth = intersection_outputs['min_depth']\n max_depth = intersection_outputs['max_depth']\n pts_idx = intersection_outputs['intersected_voxel_idx']\n dists = (max_depth - min_depth).masked_fill(pts_idx.eq(-1), 0)\n intersection_outputs['probs'] = dists / dists.sum(dim=-1, keepdim=True)\n if getattr(self.args, \"fixed_num_samples\", 0) > 0:\n intersection_outputs['steps'] = intersection_outputs['min_depth'].new_ones(\n *intersection_outputs['min_depth'].size()[:-1], 1) * self.args.fixed_num_samples\n else:\n intersection_outputs['steps'] = dists.sum(-1) / self.encoder.step_size\n return ray_start, ray_dir, intersection_outputs, hits, sampled_uv\n\n def raymarching(self, ray_start, ray_dir, intersection_outputs, encoder_states, fine=False):\n samples, all_results = super().raymarching(ray_start, ray_dir, intersection_outputs, encoder_states, fine)\n all_results['voxel_edges'] = self.encoder.get_edge(ray_start, ray_dir, samples, encoder_states)\n all_results['voxel_depth'] = samples['sampled_point_depth'][:, 0]\n return samples, all_results\n\n def prepare_hierarchical_sampling(self, intersection_outputs, samples, all_results):\n intersection_outputs = super().prepare_hierarchical_sampling(intersection_outputs, samples, all_results)\n if getattr(self.args, \"fine_num_sample_ratio\", 0) > 0:\n intersection_outputs['steps'] = samples['sampled_point_voxel_idx'].ne(-1).sum(-1).float() * self.args.fine_num_sample_ratio\n return intersection_outputs\n\n def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):\n # we need fill_in for NSVF for background\n S, V, P = sizes\n fullsize = S * V * P\n\n all_results['missed'] = fill_in((fullsize, ), hits, all_results['missed'], 1.0).view(S, V, P)\n all_results['colors'] = fill_in((fullsize, 3), hits, all_results['colors'], 0.0).view(S, V, P, 3)\n all_results['depths'] = fill_in((fullsize, ), hits, all_results['depths'], 0.0).view(S, V, P)\n\n BG_DEPTH = self.field.bg_color.depth\n bg_color = self.field.bg_color(all_results['colors'])\n all_results['colors'] += all_results['missed'].unsqueeze(-1) * bg_color.reshape(fullsize, 3).view(S, V, P, 3)\n all_results['depths'] += all_results['missed'] * BG_DEPTH\n if 'normal' in all_results:\n all_results['normal'] = fill_in((fullsize, 3), hits, all_results['normal'], 0.0).view(S, V, P, 3)\n if 'voxel_depth' in all_results:\n all_results['voxel_depth'] = fill_in((fullsize, ), hits, all_results['voxel_depth'], BG_DEPTH).view(S, V, P)\n if 'voxel_edges' in all_results:\n all_results['voxel_edges'] = fill_in((fullsize, 3), hits, all_results['voxel_edges'], 1.0).view(S, V, P, 3)\n if 'feat_n2' in all_results:\n all_results['feat_n2'] = fill_in((fullsize,), hits, all_results['feat_n2'], 0.0).view(S, V, P)\n return all_results\n\n def add_other_logs(self, all_results):\n return {'voxs_log': item(self.encoder.voxel_size),\n 'stps_log': item(self.encoder.step_size),\n 'nvox_log': item(self.encoder.num_voxels)}\n\n def _visualize(self, images, sample, output, state, **kwargs):\n img_id, shape, view, width, name = state\n images = super()._visualize(images, sample, output, state, **kwargs)\n if 'voxel_edges' in output and output['voxel_edges'] is not None:\n # voxel hitting visualization\n images['{}_voxel/{}:HWC'.format(name, img_id)] = {\n 'img': output['voxel_edges'][shape, view].float(),\n 'min_val': 0,\n 'max_val': 1,\n 'weight':\n compute_normal_map(\n sample['ray_start'][shape, view].float(),\n sample['ray_dir'][shape, view].float(),\n output['voxel_depth'][shape, view].float(),\n sample['extrinsics'][shape, view].float().inverse(),\n width, proj=True)\n }\n\n if 'feat_n2' in output and output['feat_n2'] is not None:\n images['{}_featn2/{}:HWC'.format(name, img_id)] = {\n 'img': output['feat_n2'][shape, view].float(),\n 'min_val': 0,\n 'max_val': 1\n }\n return images\n\n @torch.no_grad()\n def prune_voxels(self, th=0.5, train_stats=False):\n self.encoder.pruning(self.field, th, train_stats=train_stats)\n self.clean_caches()\n\n @torch.no_grad()\n def split_voxels(self):\n logger.info(\"half the global voxel size {:.4f} -> {:.4f}\".format(\n self.encoder.voxel_size.item(), self.encoder.voxel_size.item() * .5))\n self.encoder.splitting()\n self.encoder.voxel_size *= .5\n self.encoder.max_hits *= 1.5\n self.clean_caches()\n\n @torch.no_grad()\n def reduce_stepsize(self):\n logger.info(\"reduce the raymarching step size {:.4f} -> {:.4f}\".format(\n self.encoder.step_size.item(), self.encoder.step_size.item() * .5))\n self.encoder.step_size *= .5\n\n def clean_caches(self, reset=False):\n self.encoder.clean_runtime_caches()\n if reset:\n self.encoder.reset_runtime_caches()\n torch.cuda.empty_cache() # cache release after Model do all things\n\n@register_model_architecture(\"nsvf\", \"nsvf_base\")\ndef base_architecture(args):\n # parameter needs to be changed\n args.voxel_size = getattr(args, \"voxel_size\", None)\n args.max_hits = getattr(args, \"max_hits\", 60)\n args.raymarching_stepsize = getattr(args, \"raymarching_stepsize\", 0.01)\n args.raymarching_stepsize_ratio = getattr(args, \"raymarching_stepsize_ratio\", 0.0)\n\n # encoder default parameter\n args.voxel_embed_dim = getattr(args, \"voxel_embed_dim\", 32)\n args.voxel_path = getattr(args, \"voxel_path\", None)\n args.initial_boundingbox = getattr(args, \"initial_boundingbox\", None)\n\n # field\n args.inputs_to_density = getattr(args, \"inputs_to_density\", \"emb:6:32\")\n args.inputs_to_texture = getattr(args, \"inputs_to_texture\", \"feat:0:256, ray:4\")\n args.feature_embed_dim = getattr(args, \"feature_embed_dim\", 256)\n args.density_embed_dim = getattr(args, \"density_embed_dim\", 128)\n args.texture_embed_dim = getattr(args, \"texture_embed_dim\", 256)\n\n # API Update: fix the number of layers\n args.feature_layers = getattr(args, \"feature_layers\", 1)\n args.texture_layers = getattr(args, \"texture_layers\", 3)\n\n args.background_stop_gradient = getattr(args, \"background_stop_gradient\", False)\n args.background_depth = getattr(args, \"background_depth\", 5.0)\n\n # raymarcher\n args.discrete_regularization = getattr(args, \"discrete_regularization\", False)\n args.deterministic_step = getattr(args, \"deterministic_step\", False)\n args.raymarching_tolerance = getattr(args, \"raymarching_tolerance\", 0)\n args.use_octree = getattr(args, \"use_octree\", False)\n\n # reader\n args.pixel_per_view = getattr(args, \"pixel_per_view\", 2048)\n args.sampling_on_mask = getattr(args, \"sampling_on_mask\", 0.0)\n args.sampling_at_center = getattr(args, \"sampling_at_center\", 1.0)\n args.sampling_on_bbox = getattr(args, \"sampling_on_bbox\", False)\n args.sampling_patch_size = getattr(args, \"sampling_patch_size\", 1)\n args.sampling_skipping_size = getattr(args, \"sampling_skipping_size\", 1)\n\n # others\n args.chunk_size = getattr(args, \"chunk_size\", 64)\n args.valid_chunk_size = getattr(args, \"valid_chunk_size\", 64)\n\n\n@register_model_architecture(\"nsvf\", \"nsvf_xyz\")\ndef nerf2_architecture(args):\n args.voxel_embed_dim = getattr(args, \"voxel_embed_dim\", 0)\n args.inputs_to_density = getattr(args, \"inputs_to_density\", \"pos:10\")\n args.inputs_to_texture = getattr(args, \"inputs_to_texture\", \"feat:0:256, pos:10, ray:4\")\n base_architecture(args)\n\n\n@register_model_architecture(\"nsvf\", \"nsvf_nerf\")\ndef nerf_style_architecture(args):\n args.inputs_to_texture = getattr(args, \"inputs_to_texture\", \"feat:0:256, ray:4\")\n args.feature_layers = getattr(args, \"feature_layers\", 6)\n args.texture_layers = getattr(args, \"texture_layers\", 0)\n args.feature_field_skip_connect = getattr(args, \"feature_field_skip_connect\", 3)\n args.no_layernorm_mlp = getattr(args, \"no_layernorm_mlp\", True)\n nerf2_architecture(args)\n\n@register_model_architecture(\"nsvf\", \"nsvf_nerf_nov\")\ndef nerf_noview_architecture(args):\n args.inputs_to_texture = getattr(args, \"inputs_to_texture\", \"feat:0:256\")\n nerf_style_architecture(args)\n\n@register_model_architecture(\"nsvf\", \"nsvf_xyzn\")\ndef nerf3_architecture(args):\n args.inputs_to_texture = getattr(args, \"inputs_to_texture\", \"feat:0:256, pos:10, normal:4, ray:4\")\n nerf2_architecture(args)\n\n\n@register_model_architecture(\"nsvf\", \"nsvf_xyz_nope\")\ndef nerf3nope_architecture(args):\n args.inputs_to_texture = getattr(args, \"inputs_to_texture\", \"feat:0:256, pos:0:3, sigma:0:1, ray:4\")\n nerf2_architecture(args)\n\n@register_model_architecture(\"nsvf\", \"nsvf_xyzn_old\")\ndef nerfold_architecture(args):\n args.feature_layers = getattr(args, \"feature_layers\", 6)\n args.feature_field_skip_connect = getattr(args, \"feature_field_skip_connect\", 3)\n args.no_layernorm_mlp = getattr(args, \"no_layernorm_mlp\", True)\n args.inputs_to_texture = getattr(args, \"inputs_to_texture\", \"feat:0:256, normal:0:3, sigma:0:1, ray:4\")\n nerf2_architecture(args)\n\n@register_model_architecture(\"nsvf\", \"nsvf_xyzn_nope\")\ndef nerf2nope_architecture(args):\n args.inputs_to_texture = getattr(args, \"inputs_to_texture\", \"feat:0:256, pos:0:3, normal:0:3, sigma:0:1, ray:4\")\n nerf2_architecture(args)\n\n@register_model_architecture(\"nsvf\", \"nsvf_xyzn_noz\")\ndef nerf3noz_architecture(args):\n args.inputs_to_texture = getattr(args, \"inputs_to_texture\", \"pos:10, normal:4, ray:4\")\n nerf2_architecture(args)\n\n@register_model_architecture(\"nsvf\", \"nsvf_embn\")\ndef nerf4_architecture(args):\n args.inputs_to_density = getattr(args, \"inputs_to_density\", \"emb:6:32\")\n args.inputs_to_texture = getattr(args, \"inputs_to_texture\", \"feat:0:256, normal:4, ray:4\")\n base_architecture(args)\n\n\n@register_model_architecture(\"nsvf\", \"nsvf_emb0\")\ndef nerf5_architecture(args):\n args.voxel_embed_dim = getattr(args, \"voxel_embed_dim\", 384)\n args.inputs_to_density = getattr(args, \"inputs_to_density\", \"emb:0:384\")\n args.inputs_to_texture = getattr(args, \"inputs_to_texture\", \"feat:0:256, ray:4\")\n base_architecture(args)\n\n\n@register_model('disco_nsvf')\nclass DiscoNSVFModel(NSVFModel):\n\n FIELD = \"disentangled_radiance_field\"\n\n\n@register_model_architecture(\"disco_nsvf\", \"disco_nsvf\")\ndef disco_nsvf_architecture(args):\n args.compressed_light_dim = getattr(args, \"compressed_light_dim\", 64)\n nerf3_architecture(args)\n\n\n@register_model('multi_disco_nsvf')\nclass mDiscoNSVFModel(NSVFModel):\n\n ENCODER = \"multi_sparsevoxel_encoder\"\n FIELD = \"disentangled_radiance_field\"\n\n\n@register_model_architecture(\"multi_disco_nsvf\", \"multi_disco_nsvf\")\ndef mdisco_nsvf_architecture(args):\n args.inputs_to_density = getattr(args, \"inputs_to_density\", \"pos:10, context:0:256\")\n args.inputs_to_texture = getattr(args, \"inputs_to_texture\", \"feat:0:256, pos:10, normal:4, ray:4, context:0:256\")\n disco_nsvf_architecture(args)\n\n\n@register_model('sdf_nsvf')\nclass SDFNSVFModel(NSVFModel):\n\n FIELD = \"sdf_radiance_field\"\n\n\n@register_model_architecture(\"sdf_nsvf\", \"sdf_nsvf\")\ndef sdf_nsvf_architecture(args):\n args.feature_layers = getattr(args, \"feature_layers\", 6)\n args.feature_field_skip_connect = getattr(args, \"feature_field_skip_connect\", 3)\n args.no_layernorm_mlp = getattr(args, \"no_layernorm_mlp\", True)\n nerf2nope_architecture(args)\n\n\n@register_model('sdf_nsvf_sfx')\nclass SDFSFXNSVFModel(SDFNSVFModel):\n\n FIELD = \"sdf_radiance_field\"\n RAYMARCHER = \"surface_volume_rendering\"\n\n\n@register_model_architecture(\"sdf_nsvf_sfx\", \"sdf_nsvf_sfx\")\ndef sdf_nsvfsfx_architecture(args):\n sdf_nsvf_architecture(args)\n"
] |
[
[
"torch.no_grad",
"torch.cuda.empty_cache"
]
] |
aaronscherzinger/connectfour_reinforcementlearning
|
[
"084993edb4c1de44f14cf4e1b4ceced953bf688a"
] |
[
"test_trained_model.py"
] |
[
"import tensorflow as tf\n\nimport numpy as np \nimport connect_four\n\ndef board_to_column_vector(board):\n '''converts a playing board (i.e., 2D numpy array with shape [7,6]) to a column vector with shape [1, 42]'''\n assert(board.shape == (7,6)), \"board does not have shape (7,6)\"\n return np.reshape(board.flatten(), (1, 42))\n\n# create tensorflow session \ntest_session = tf.Session()\ntf.reset_default_graph()\n\n# import model\nimport_path = './current_model'\nmeta_graph_def = tf.saved_model.loader.load(\n test_session,\n [tf.saved_model.tag_constants.SERVING],\n import_path)\nsignature = meta_graph_def.signature_def\n\n# get input and output tensors\nsignature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\ninput_tensor_name = signature[signature_key].inputs[\"input_x\"].name\noutput_tensor_name = signature[signature_key].outputs[\"network_output\"].name\n\ninput_x = test_session.graph.get_tensor_by_name(input_tensor_name)\nnetwork_output = test_session.graph.get_tensor_by_name(output_tensor_name)\n\ntf.get_default_graph().finalize()\n\n\n# playing board as global variable\nplaying_board = connect_four.PlayingBoard()\nplaying_board.insert(5, 1)\nplaying_board.insert(4, 1)\nplaying_board.insert(3, 1)\nplaying_board.insert(1, -1)\nplaying_board.insert(1, -1)\nplaying_board.insert(0, -1)\n\nplaying_board.invert_board()\n\n# test\nfeed_dict = { input_x : board_to_column_vector(playing_board.get_board()) }\nestimates = test_session.run(network_output, feed_dict)[0]\n\nprint(estimates)\nplaying_board.print_board()\nprint(np.argmax(estimates))\n\nplaying_board.reset_board()\n\n# construct kind of an extreme test situation\n# 1. one full column (should yield and expection of -2)\nplayer = -1\nfor i in range(0, 6):\n playing_board.insert(6, player)\n player *= -1\n\n# 2. one column which allows us to win (should yield expectation of 1)\nplaying_board.insert(0, 1)\nplaying_board.insert(0, 1)\nplaying_board.insert(0, 1)\n\n# 3. one column where the opponent would win if we select anything else\nplaying_board.insert(2, -1)\nplaying_board.insert(2, -1)\nplaying_board.insert(2, -1)\n\n# we are player two -> one more of -1\n#playing_board.insert(3, -1)\n\n# test\nfeed_dict = { input_x : board_to_column_vector(playing_board.get_board()) }\nestimates = test_session.run(network_output, feed_dict)[0]\n\nprint(estimates)\nplaying_board.print_board()\nprint(np.argmax(estimates))\n"
] |
[
[
"tensorflow.reset_default_graph",
"numpy.argmax",
"tensorflow.Session",
"tensorflow.get_default_graph",
"tensorflow.saved_model.loader.load"
]
] |
kopecmartin/grains-recognition
|
[
"72eade0f60800a6d3c9361bb74ff35e3445a9baf"
] |
[
"AABBlib/detection.py"
] |
[
"#!/usr/bin/env python3\nfrom fractions import Fraction\nfrom scipy import ndimage\nimport math\n\n\nclass Detector(object):\n \"\"\"Class Detector\n Detect bounded boxes\n \"\"\"\n def __init__(self, img):\n self.img = img\n\n def get_bounded_boxes(self):\n \"\"\"Get bounded boxes from thresholded image\"\"\"\n s = ndimage.generate_binary_structure(2, 2)\n labeled_arr, num_objects = ndimage.label(self.img, structure=s)\n\n dots = ndimage.find_objects(labeled_arr)\n\n bboxes = []\n for i, j in enumerate(dots):\n\n if (dots[i][0].start != 0 and\n dots[i][1].start != 0 and\n dots[i][0].stop < self.img.shape[0] and\n dots[i][1].stop < self.img.shape[1] and\n labeled_arr[j].shape[0] > 10 and\n labeled_arr[j].shape[1] > 10):\n\n garbage_arr, num_garbage = ndimage.label(labeled_arr[j],\n structure=s)\n garbage = ndimage.find_objects(garbage_arr)\n\n if len(garbage) > 1:\n for k, l in enumerate(garbage):\n if (not (garbage[k][0].start == 0 and\n garbage[k][1].start == 0 and\n garbage[k][0].stop == labeled_arr[j].shape[0] and\n garbage[k][1].stop == labeled_arr[j].shape[1])):\n garbage_arr[l] = 0\n bboxes.append(garbage_arr)\n else:\n bboxes.append(labeled_arr[j])\n\n return bboxes\n\n def _get_border_from_left(self, row):\n for i in range(0, len(row)):\n if row[i] != 0:\n # return index of the column where the\n # color is not background color (black)\n return i\n\n def _get_border_from_right(self, row):\n length = len(row)\n for i in range(length - 1, -1, -1):\n if row[i] != 0:\n return i\n\n def _get_border_from_top(self, c, matrix):\n for i in range(0, len(matrix)):\n if matrix[i, c] != 0:\n return i\n\n def _get_border_from_bottom(self, c, matrix):\n length = len(matrix)\n for i in range(length - 1, -1, -1):\n if matrix[i, c] != 0:\n return i\n\n def _append_if_not_in(self, what, to):\n if what not in to:\n to.append(what)\n return to\n\n def convex_hull(self, bbox):\n borders = []\n r_length = len(bbox)\n c_length = len(bbox[0]) # all rows has the same length\n\n for c in range(0, c_length):\n r = self._get_border_from_top(c, bbox)\n coordinates = [r, c]\n borders = self._append_if_not_in(coordinates, borders)\n\n r = self._get_border_from_bottom(c, bbox)\n coordinates = [r, c]\n borders = self._append_if_not_in(coordinates, borders)\n\n for r in range(0, r_length):\n c = self._get_border_from_left(bbox[r])\n coordinates = [r, c]\n borders = self._append_if_not_in(coordinates, borders)\n\n c = self._get_border_from_right(bbox[r])\n coordinates = [r, c]\n borders = self._append_if_not_in(coordinates, borders)\n\n return borders\n\n def _get_len(self, p1, p2):\n \"\"\"Get distance between 2 points\"\"\"\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)\n\n def _get_lengths(self, edge_list):\n \"\"\"Get [{'length' : length, 'points': [[x1, y1], [x2, y2]]}]\"\"\"\n return [dict([('length', self._get_len(i, j)), ('points', [i, j])]) for i in edge_list for j in edge_list]\n\n def max_length(self, edge_list):\n lengths = self._get_lengths(edge_list)\n\n max_len = 0\n max_points = []\n for d in lengths:\n if d['length'] > max_len:\n max_len = d['length']\n max_points = d['points']\n\n return round(max_len, 2), max_points\n\n def _get_line_eq(self, points):\n \"\"\" Compute line equation from given two points\n \"\"\"\n # A x + B y + C = 0\n # (y1-y2)x + (x2-x1)y + (x1y2-x2y1) = 0\n # points = [[x1, y1], [x2, y2]]\n x1 = points[0] [0]\n y1 = points[0] [1]\n x2 = points[1] [0]\n y2 = points[1] [1]\n return {'a':(y1-y2), 'b':(x2-x1), 'c':(x1*y2-x2*y1)}\n\n def _is_below_line(self, line_eq, point):\n result = line_eq['a'] * point[0] + line_eq['b'] * point[1] + line_eq['c']\n if result > 0:\n return False\n else:\n return True\n\n def _is_normal(self, line_eq, normal_eq):\n \"\"\" True if lines are orthogonal\n \"\"\"\n if (line_eq['a'] * normal_eq['a'] + line_eq['a'] * normal_eq['b'] + line_eq['b'] * normal_eq['a'] + line_eq['b'] * normal_eq['b']) == 0:\n return True\n else:\n return False\n\n def _get_normal(self, line_eq, point):\n lamb = (-line_eq['c'] - (line_eq['a']*point[0]) - (line_eq['b']*point[1])) / (line_eq['a']*line_eq['a'] + line_eq['b']*line_eq['b'])\n x = point[0] + lamb * line_eq['a']\n y = point[1] + lamb * line_eq['b']\n return self._get_line_eq([point,[x, y]])\n\n def _is_uninterrupted(self, box, equation, x1, y1, x2, y2):\n normal = self._draw_line(x1, y1, x2, y2)\n for point in normal:\n if box[point[1]][point[0]] == 0:\n return False\n return True\n\n def _draw_line(self, x0, y0, x1, y1):\n points = []\n rev = reversed\n if abs(y1 - y0) <= abs(x1 - x0):\n x0, y0, x1, y1 = y0, x0, y1, x1\n rev = lambda x: x\n if x1 < x0:\n x0, y0, x1, y1 = x1, y1, x0, y0\n leny = abs(y1 - y0)\n for i in range(leny + 1):\n points.append([*rev((round(Fraction(i, leny) * (x1 - x0)) + x0, (1 if y1 > y0 else -1) * i + y0))])\n return points\n\n def _split_along_line(self, line_eq, points):\n points_below = []\n points_above = []\n for point in points:\n if self._is_below_line(line_eq, point):\n points_below.append(point)\n else:\n points_above.append(point)\n return points_below, points_above\n\n def max_thickness(self, points, edge, box):\n max_value = 0\n value = None\n\n line_eq = self._get_line_eq(points)\n\n edge_below, edge_above = self._split_along_line(line_eq, edge)\n\n normal_eq = self._get_normal(line_eq, edge_above[int(len(edge_above)/2)])\n\n normal_above_left, normal_above_rigth = self._split_along_line(normal_eq, edge_above)\n normal_below_left, normal_below_rigth = self._split_along_line(normal_eq, edge_below)\n\n for a in normal_above_left:\n for b in normal_below_left:\n propsed_normal = self._get_line_eq([a, b])\n if self._is_normal(line_eq, propsed_normal):\n if self._is_uninterrupted(box, propsed_normal, a[0], a[1], b[0], b[1]):\n value = self._get_len(a, b)\n if value > max_value:\n max_value = value\n\n for a in normal_above_rigth:\n for b in normal_below_rigth:\n propsed_normal = self._get_line_eq([a, b])\n if self._is_normal(line_eq, propsed_normal):\n if self._is_uninterrupted(box, propsed_normal, a[0], a[1], b[0], b[1]):\n value = self._get_len(a, b)\n if value > max_value:\n max_value = value\n\n return max_value\n"
] |
[
[
"scipy.ndimage.label",
"scipy.ndimage.generate_binary_structure",
"scipy.ndimage.find_objects"
]
] |
theoway/raster-vision
|
[
"fc181a6f31f085affa1ee12f0204bdbc5a6bf85a"
] |
[
"rastervision_core/rastervision/core/data/raster_source/rasterized_source.py"
] |
[
"import logging\n\nfrom rasterio.features import rasterize\nimport numpy as np\nfrom shapely.geometry import shape\nfrom shapely.strtree import STRtree\nfrom shapely.ops import transform\n\nfrom rastervision.core.data import (ActivateMixin, ActivationError)\nfrom rastervision.core.data.raster_source import RasterSource\n\nlog = logging.getLogger(__name__)\n\n\ndef geoms_to_raster(str_tree, rasterizer_config, window, extent):\n background_class_id = rasterizer_config.background_class_id\n all_touched = rasterizer_config.all_touched\n\n log.debug('Cropping shapes to window...')\n # Crop shapes against window, remove empty shapes, and put in window frame of\n # reference.\n window_geom = window.to_shapely()\n shapes = str_tree.query(window_geom)\n shapes = [(s, s.class_id) for s in shapes]\n shapes = [(s.intersection(window_geom), c) for s, c in shapes]\n shapes = [(s, c) for s, c in shapes if not s.is_empty]\n\n def to_window_frame(x, y, z=None):\n return (x - window.xmin, y - window.ymin)\n\n shapes = [(transform(to_window_frame, s), c) for s, c in shapes]\n log.debug('# of shapes in window: {}'.format(len(shapes)))\n\n out_shape = (window.get_height(), window.get_width())\n\n # rasterize needs to be passed >= 1 shapes.\n if shapes:\n log.debug('rasterio.rasterize()...')\n raster = rasterize(\n shapes,\n out_shape=out_shape,\n fill=background_class_id,\n dtype=np.uint8,\n all_touched=all_touched)\n else:\n raster = np.full(out_shape, background_class_id, dtype=np.uint8)\n\n return raster\n\n\nclass RasterizedSource(ActivateMixin, RasterSource):\n \"\"\"A RasterSource based on the rasterization of a VectorSource.\"\"\"\n\n def __init__(self, vector_source, rasterizer_config, extent,\n crs_transformer):\n \"\"\"Constructor.\n\n Args:\n vector_source: (VectorSource)\n rasterizer_config: (RasterizerConfig)\n extent: (Box) extent of corresponding imagery RasterSource\n crs_transformer: (CRSTransformer)\n \"\"\"\n self.vector_source = vector_source\n self.rasterizer_config = rasterizer_config\n self.extent = extent\n self.crs_transformer = crs_transformer\n self.activated = False\n\n super().__init__(channel_order=[0], num_channels=1)\n\n def get_extent(self):\n \"\"\"Return the extent of the RasterSource.\n\n Returns:\n Box in pixel coordinates with extent\n \"\"\"\n return self.extent\n\n def get_dtype(self):\n \"\"\"Return the numpy.dtype of this scene\"\"\"\n return np.uint8\n\n def get_crs_transformer(self):\n \"\"\"Return the associated CRSTransformer.\"\"\"\n return self.crs_transformer\n\n def _get_chip(self, window):\n \"\"\"Return the chip located in the window.\n\n Polygons falling within the window are rasterized using the class_id, and\n the background is filled with background_class_id. Also, any pixels in the\n window outside the extent are zero, which is the don't-care class for\n segmentation.\n\n Args:\n window: Box\n\n Returns:\n [height, width, channels] numpy array\n \"\"\"\n if not self.activated:\n raise ActivationError('GeoJSONSource must be activated before use')\n\n log.debug('Rasterizing window: {}'.format(window))\n chip = geoms_to_raster(self.str_tree, self.rasterizer_config, window,\n self.get_extent())\n # Add third singleton dim since rasters must have >=1 channel.\n return np.expand_dims(chip, 2)\n\n def _activate(self):\n geojson = self.vector_source.get_geojson()\n geoms = []\n for f in geojson['features']:\n geom = shape(f['geometry'])\n geom.class_id = f['properties']['class_id']\n geoms.append(geom)\n self.str_tree = STRtree(geoms)\n self.activated = True\n\n def _deactivate(self):\n self.str_tree = None\n self.activated = False\n"
] |
[
[
"numpy.expand_dims",
"numpy.full"
]
] |
deepskies/DeeplyUncertain-Public
|
[
"04f94da29939b2d7dfef70d40dee8dc752e0da48"
] |
[
"models/mlp_tf.py"
] |
[
"import tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python.internal import tensorshape_util\n\ntfk = tf.keras\ntfkl = tf.keras.layers\ntfpl = tfp.layers\ntfd = tfp.distributions\n\nn_train = 90000\n\n\nclass MeanMetricWrapper(tfk.metrics.Mean):\n # code by @mcourteaux from https://github.com/tensorflow/probability/issues/742#issuecomment-580433644\n def __init__(self, fn, name=None, dtype=None, **kwargs):\n super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype)\n self._fn = fn\n self._fn_kwargs = kwargs\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n matches = self._fn(y_true, y_pred, **self._fn_kwargs)\n return super(MeanMetricWrapper, self).update_state(\n matches, sample_weight=sample_weight)\n\n def get_config(self):\n config = {}\n for k, v in six.iteritems(self._fn_kwargs):\n config[k] = K.eval(v) if is_tensor_or_variable(v) else v\n base_config = super(MeanMetricWrapper, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef scaled_kl_fn(a, b, _):\n \"\"\"\n idea from\n https://github.com/google-research/google-research/blob/9645220c865ab5603b377e6a98265631ece61d44/uq_benchmark_2019/uq_utils.py\n https://arxiv.org/pdf/1906.02530.pdf\n :param a: distribution\n :param b: distribution\n :return: scaled kl divergence\n \"\"\"\n return tfd.kl_divergence(a, b) / n_train\n\n\ndef mmd_from_dists(a, b, _):\n p = a.distribution\n q = b.distribution\n\n num_reduce_dims = (tensorshape_util.rank(a.event_shape) -\n tensorshape_util.rank(p.event_shape))\n gamma_sq = 0.5\n reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)]\n for i in reduce_dims:\n gamma_sq *= a.event_shape[i]\n\n sigma_p = tf.convert_to_tensor(tf.square(p.scale))\n sigma_q = tf.convert_to_tensor(tf.square(q.scale))\n scale_pp = gamma_sq + 2 * sigma_p\n scale_qq = gamma_sq + 2 * sigma_q\n scale_cr = gamma_sq + sigma_p + sigma_q\n\n return tf.reduce_sum(\n tf.math.sqrt(gamma_sq / scale_pp) + tf.math.sqrt(gamma_sq / scale_qq)\n - 2 * tf.math.sqrt(gamma_sq / scale_cr) * tf.math.exp(\n -0.5 * tf.math.squared_difference(p.loc, q.loc) / scale_cr),\n axis=reduce_dims)\n\n\ndef negloglik(y_data, rv_y):\n return -rv_y.log_prob(y_data)\n\n\ndef negloglik_met(y_true, y_pred):\n return tf.reduce_mean(-y_pred.log_prob(tf.cast(y_true, tf.float32)))\n\n\ndef mlp(hidden_dim=100, n_layers=3, n_inputs=13, dropout_rate=0, loss='mse'):\n input_data = tfkl.Input((n_inputs,))\n x = input_data\n for _ in range(n_layers):\n x = tfkl.Dense(hidden_dim, activation='relu')(x)\n if dropout_rate > 0:\n x = tfkl.Dropout(dropout_rate)(x)\n\n if loss == 'mse':\n x = tfkl.Dense(1)(x)\n model = tfk.Model(input_data, x)\n model.compile(loss='mean_squared_error', optimizer=tf.optimizers.Adam())\n elif loss == 'nll':\n x = tfkl.Dense(2)(x)\n x = tfpl.DistributionLambda(lambda t: tfd.Normal(loc=t[..., :1],\n scale=1e-3 + tf.math.softplus(t[..., 1:])))(x)\n model = tfk.Model(input_data, x)\n model.compile(optimizer=tf.optimizers.Adam(), loss=negloglik, metrics=['mse'])\n else:\n raise ValueError(f'Loss {loss} not implemented.')\n\n return model\n\n\ndef mlp_flipout(hidden_dim=100, n_layers=3, n_inputs=13, dropout_rate=0, kernel='kl'):\n input_img = tfkl.Input(n_inputs)\n x = input_img\n if kernel == 'kl':\n kernel_fn = scaled_kl_fn\n elif kernel == 'mmd':\n kernel_fn = mmd_from_dists\n else:\n raise ValueError(f'Kernel {kernel} not defined!')\n \n for _ in range(n_layers):\n x = tfpl.DenseFlipout(hidden_dim, activation='relu', kernel_divergence_fn=kernel_fn)(x)\n if dropout_rate > 0:\n x = tfkl.Dropout(dropout_rate)(x)\n x = tfpl.DenseFlipout(2, kernel_divergence_fn=kernel_fn)(x)\n x = tfpl.DistributionLambda(lambda t: tfd.Normal(loc=t[..., :1],\n scale=1e-3 + tf.math.softplus(t[..., 1:])))(x)\n model = tfk.Model(input_img, x)\n\n model.compile(optimizer=tf.optimizers.Adam(learning_rate=1e-4), loss=negloglik,\n metrics=['mse', MeanMetricWrapper(negloglik_met, name='nll')])\n\n return model\n"
] |
[
[
"tensorflow.math.sqrt",
"tensorflow.cast",
"tensorflow.math.squared_difference",
"tensorflow.math.softplus",
"tensorflow.square",
"tensorflow.optimizers.Adam"
]
] |
adrienatallah/autogluon
|
[
"014faf7e98bd0e349d0900f713442536b0f69b1e"
] |
[
"text/src/autogluon/text/text_prediction/dataset.py"
] |
[
"import collections\nimport collections.abc\nimport numpy as np\nimport pandas as pd\nimport json\nfrom autogluon.core.utils.loaders import load_pd\nfrom . import constants as _C\nfrom .column_property import CategoricalColumnProperty, TextColumnProperty, NumericalColumnProperty,\\\n get_column_properties_from_metadata\nfrom autogluon_contrib_nlp.base import INT_TYPES, FLOAT_TYPES, BOOL_TYPES\nfrom typing import List, Optional, Union, Dict, Tuple\n\n\ndef random_split_train_val(df, valid_ratio=0.15,\n stratified=False, label=None, num_repeats=1, rng=None):\n \"\"\"Randomly split a given dataset into train + valid dataset with stratified sampling.\n\n Parameters\n ----------\n df\n valid_ratio\n stratified\n Whether to use Stratified split.\n If it's a categorical column, we will split based on the categorical value.\n label\n The label column. Will be used if conducting stratified sampling\n num_repeats\n The number of repeats\n rng\n The random number generator\n\n Returns\n -------\n ret\n 1. num_repeats == 1\n train_dataset\n The split training dataset\n valid_dataset\n The split validation dataset\n 2. num_repeats > 1\n returns a list of (train_dataset, valid_dataset)\n \"\"\"\n if rng is None:\n rng = np.random.RandomState()\n if not stratified:\n num_total = len(df)\n num_valid = np.ceil(num_total * valid_ratio).astype('int')\n indices = np.arange(num_total)\n if num_repeats == 1:\n rng.shuffle(indices)\n valid_indices = indices[:num_valid]\n train_indices = indices[num_valid:]\n return df.iloc[train_indices], df.iloc[valid_indices]\n else:\n out = []\n for i in range(num_repeats):\n rng.shuffle(indices)\n valid_indices = indices[:num_valid]\n train_indices = indices[num_valid:]\n out.append((df.iloc[train_indices], df.iloc[valid_indices]))\n return out\n else:\n raise NotImplementedError('Currently, stratified sampling is not supported.')\n\n\ndef is_categorical_column(data: pd.Series,\n threshold: int = 100,\n ratio: float = 0.1,\n is_label_columns: bool = False,\n default_allow_missing: bool = True) -> Tuple[bool, bool]:\n \"\"\"Check whether the column is a categorical column.\n\n If the number of unique elements in the column is smaller than\n\n min(#Total Sample * ratio, threshold),\n\n it will be treated as a categorical column\n\n Parameters\n ----------\n data\n The column data\n threshold\n The threshold for detecting categorical column\n is_label_columns\n Whether the column is a label column\n ratio\n The ratio for detecting categorical column\n\n Returns\n -------\n is_categorical\n Whether the column is a categorical column\n parsed_allow_missing\n \"\"\"\n if data.dtype.name == 'category':\n return True, default_allow_missing\n threshold = min(int(len(data) * ratio), threshold)\n sample_set = set()\n element = data[data.first_valid_index()]\n if isinstance(element, str):\n for idx, sample in data.items():\n sample_set.add(sample)\n if len(sample_set) > threshold:\n return False, False\n if is_label_columns:\n return True, False\n else:\n return True, default_allow_missing\n elif isinstance(element, INT_TYPES):\n value_counts = data.value_counts()\n if value_counts.keys().min() == 0 and value_counts.keys().max() == len(value_counts) - 1:\n return True, False\n else:\n return False, False\n elif isinstance(element, BOOL_TYPES):\n return True, False\n else:\n return False, False\n\n\ndef get_column_properties(\n df: 'DataFrame',\n label_columns: Optional[Union[str, List[str]]],\n metadata: Optional[Dict] = None,\n provided_column_properties: Optional[Dict] = None,\n categorical_default_handle_missing_value: bool = True) -> collections.OrderedDict:\n \"\"\"Inference the column types of the data frame\n\n Parameters\n ----------\n df\n Pandas Dataframe\n label_columns\n The chosen column names of the table\n metadata\n The additional metadata object to help specify the column types\n {'col_name': {'type': type_string}}\n provided_column_properties\n The column properties provided.\n For example, these can be the column properties of the training set and you provide this\n to help infer the column properties of the dev/test set.\n categorical_default_handle_missing_value\n Whether to handle missing values for categorical columns by default\n\n Returns\n -------\n column_properties\n Dictionary of column properties\n\n \"\"\"\n if label_columns is None:\n label_columns_set = set()\n elif isinstance(label_columns, str):\n label_columns_set = set([label_columns])\n else:\n label_columns_set = set(label_columns)\n column_properties = collections.OrderedDict()\n # Process all feature columns\n column_properties_from_metadata = get_column_properties_from_metadata(metadata)\n for col_name in df.columns:\n if provided_column_properties is not None and col_name in provided_column_properties:\n column_properties[col_name] = provided_column_properties[col_name].clone()\n column_properties[col_name].parse(df[col_name])\n continue\n if col_name in column_properties_from_metadata:\n column_properties[col_name] = column_properties_from_metadata[col_name].clone()\n column_properties[col_name].parse(df[col_name])\n continue\n idx = df[col_name].first_valid_index()\n if idx is None:\n # No valid index, it should have been handled previously\n raise ValueError('Column Name=\"{}\" has no valid data and is ignored.'.format(col_name))\n ele = df[col_name][idx]\n # Try to inference the categorical column\n if isinstance(ele, collections.abc.Hashable) and not isinstance(ele, FLOAT_TYPES):\n # Try to tell if the column is a categorical column\n is_categorical, allow_missing = is_categorical_column(\n df[col_name],\n is_label_columns=col_name in label_columns_set)\n if is_categorical:\n column_properties[col_name] = CategoricalColumnProperty(allow_missing=allow_missing)\n column_properties[col_name].parse(df[col_name])\n continue\n if isinstance(ele, str):\n column_properties[col_name] = TextColumnProperty()\n column_properties[col_name].parse(df[col_name])\n continue\n # Raise error if we find an entity column\n if isinstance(ele, list):\n if isinstance(ele[0], (tuple, dict)):\n raise ValueError('An Entity column \"{}\" is found but no metadata is given.'\n .format(col_name))\n elif isinstance(ele, dict):\n raise ValueError('An Entity column \"{}\" is found but no metadata is given.'\n .format(col_name))\n column_properties[col_name] = NumericalColumnProperty()\n column_properties[col_name].parse(df[col_name])\n return column_properties\n\n\ndef normalize_df(df, convert_text_to_numerical=True, remove_none=True):\n \"\"\"Try to convert the text columns in the input data-frame to numerical columns\n\n Parameters\n ----------\n df\n The DataFrame\n convert_text_to_numerical\n Whether to convert text columns to numerical columns\n remove_none\n Whether to try to remove None values in the sample.\n\n Returns\n -------\n new_df\n The normalized dataframe\n \"\"\"\n conversion_cols = dict()\n for col_name in df.columns:\n col = df[col_name]\n idx = col.first_valid_index()\n if idx is not None:\n val = col[idx]\n if isinstance(val, str):\n num_missing = col.isnull().sum().sum().item()\n if num_missing > 0 and remove_none:\n col = col.fillna('')\n conversion_cols[col_name] = col\n if convert_text_to_numerical:\n try:\n new_col = pd.to_numeric(col)\n conversion_cols[col_name] = new_col\n except Exception:\n pass\n finally:\n pass\n if len(conversion_cols) == 0:\n return df\n else:\n new_df = df.copy()\n for col_name in conversion_cols:\n new_df[col_name] = conversion_cols[col_name]\n return new_df\n\n\ndef infer_problem_type(column_properties, label_col_name):\n \"\"\"\n\n Parameters\n ----------\n column_properties\n The property of the columns\n label_col_name\n Name of the label column\n\n Returns\n -------\n problem_type\n Type of the problem\n label_shape\n Shape of the label\n \"\"\"\n if column_properties[label_col_name].type == _C.CATEGORICAL:\n return _C.CLASSIFICATION, column_properties[label_col_name].num_class\n elif column_properties[label_col_name].type == _C.NUMERICAL:\n return _C.REGRESSION, column_properties[label_col_name].shape\n else:\n raise NotImplementedError('Cannot infer the problem type')\n\n\nclass TabularDataset:\n def __init__(self, path_or_df: Union[str, pd.DataFrame],\n *,\n columns=None,\n label_columns=None,\n column_metadata: Optional[Union[str, Dict]] = None,\n column_properties: Optional[collections.OrderedDict] = None,\n categorical_default_handle_missing_value=True):\n \"\"\"\n\n Parameters\n ----------\n path_or_df\n The path or dataframe of the tabular dataset for NLP.\n columns\n The chosen columns to load the data\n label_columns\n The name of the label columns. This helps to infer the column properties.\n column_metadata\n The metadata object that describes the property of the columns in the dataset\n column_properties\n The given column properties\n categorical_default_handle_missing_value\n Whether to handle missing value in categorical columns by default\n \"\"\"\n super().__init__()\n if isinstance(path_or_df, pd.DataFrame):\n df = path_or_df\n else:\n df = load_pd.load(path_or_df)\n if columns is not None:\n if not isinstance(columns, list):\n columns = [columns]\n df = df[columns]\n df = normalize_df(df)\n if column_metadata is None:\n column_metadata = dict()\n elif isinstance(column_metadata, str):\n with open(column_metadata, 'r') as f:\n column_metadata = json.load(f)\n # Inference the column properties\n column_properties = get_column_properties(\n df,\n metadata=column_metadata,\n label_columns=label_columns,\n provided_column_properties=column_properties,\n categorical_default_handle_missing_value=categorical_default_handle_missing_value)\n for col_name, prop in column_properties.items():\n if prop.type == _C.TEXT:\n df[col_name] = df[col_name].fillna('').apply(str)\n elif prop.type == _C.NUMERICAL:\n df[col_name] = df[col_name].fillna(-1).apply(np.array)\n self._table = df\n self._column_properties = column_properties\n\n @property\n def columns(self):\n return list(self._table.columns)\n\n @property\n def table(self):\n return self._table\n\n @property\n def column_properties(self):\n return self._column_properties\n\n def __str__(self):\n ret = 'Columns:\\n\\n'\n for col_name in self.column_properties.keys():\n ret += '- ' + str(self.column_properties[col_name])\n ret += '\\n'\n return ret\n"
] |
[
[
"numpy.arange",
"numpy.random.RandomState",
"pandas.to_numeric",
"numpy.ceil"
]
] |
epn-ml/IWF-ICMEs
|
[
"85390a3990462cb8aa9639956acc353134a224ce"
] |
[
"event.py"
] |
[
"import pandas as pds\nimport datetime\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport seaborn as sns\n\n\nclass Event:\n\n def __init__(self, begin, end, param=None):\n self.begin = begin\n self.end = end\n self.proba = None\n self.duration = self.end-self.begin\n\n def __eq__(self, other):\n '''\n return True if other overlaps self during 65/100 of the time\n '''\n return overlap(self, other) > 0.65*self.duration\n\n def __str__(self):\n return \"{} ---> {}\".format(self.begin, self.end)\n\n def get_Proba(self, y):\n '''\n Give the mean probability of the event following the list\n of event predicted probability y\n '''\n self.proba = y[self.begin:self.end].mean()\n\n def get_data(self, df):\n self.param = df\n\n\n def iwfplot(self, data, delta, i, typ, predstart, predend):\n return plot_insitu_icmecat_mag_plasma(data, self.begin, self.end, delta, i, typ, predstart, predend)\n \n def iwfplotnopred(self, data, delta, i, typ):\n return plot_insitu_icmecat_mag_plasma_nopred(data, self.begin, self.end, delta, i, typ)\n \n def heatplot(self, data, delta, i, typ, prediction, similarities):\n return heatplot(data, self.begin, self.end, delta, i, typ, prediction, similarities)\n\n def getValue(self, df, feature):\n '''\n for a given df, return the mean of a given feature during the events\n '''\n return df[feature][self.begin:self.end].mean()\n \n def plotinspect(self,data,delta,i,typ,spacecraft):\n return plotinspecter(data, self.begin, self.end, delta, i, typ, spacecraft)\n \n \ndef plotinspecter(data, start, end, delta, i, typ,spacecraft):\n \n \n sns.set_style('darkgrid')\n sns.set_context('paper')\n \n fig=plt.figure()\n \n data = data[start-datetime.timedelta(hours=delta):\n end+datetime.timedelta(hours=delta)]\n \n #sharex means that zooming in works with all subplots\n ax1 = plt.subplot(111) \n\n ax1.plot_date(data.index, data[typ],'-r',label=typ,linewidth=0.5)\n \n #plot vertical lines\n ax1.plot_date([start,start],[-500,500],'-k',label = 'event',linewidth=1) \n ax1.plot_date([end,end],[-500,500],'-k',linewidth=1) \n \n plt.ylabel(typ)\n plt.legend(loc=3,ncol=4,fontsize=8)\n \n ax1.set_ylim(-np.nanmax(data[typ])-5,np.nanmax(data[typ])+5) \n \n \n plt.setp(ax1.get_xticklabels(), visible=False)\n\n plt.title(spacecraft+'ICME'+' data, start: '+start.strftime(\"%Y-%b-%d %H:%M\")+' end: '+end.strftime(\"%Y-%b-%d %H:%M\"))\n \n plt.tight_layout()\n plt.show()\n\n\n #plotfile=typ+'ICME'+' data, start: '+start.strftime(\"%Y-%b-%d %H:%M\")+' end: '+end.strftime(\"%Y-%b-%d %H:%M\")+'.png'\n \n #plt.savefig(plotfile)\n #print('saved as ',plotfile) \n \ndef overlap(event1, event2):\n '''return the time overlap between two events as a timedelta'''\n delta1 = min(event1.end, event2.end)\n delta2 = max(event1.begin, event2.begin)\n return max(delta1-delta2,\n datetime.timedelta(0))\n\ndef isInList(ref_event, event_list, thres):\n '''\n returns True if ref_event is overlapped thres percent of its duration by\n at least one elt in event_list\n '''\n return max(overlapWithList(ref_event,event_list)) > thres*ref_event.duration\n\n\ndef find(ref_event, event_list, thres, choice='first'):\n '''\n Return the event in event_list that overlap ref_event for a given threshold\n if it exists\n Choice give the preference of returned :\n first return the first of the lists\n Best return the one with max overlap\n merge return the combination of all of them\n '''\n if isInList(ref_event, event_list, thres):\n return(choseEventFromList(ref_event, event_list, choice))\n else:\n return None\n \ndef similarity(event1, event2):\n if event1 is None:\n return 0\n inter = overlap(event1, event2)\n return inter/(event1.duration+event2.duration-inter)\n\n \ndef read_cat(begin, end, iwinind, dateFormat=\"%Y/%m/%d %H:%M\",\n sep=',', get_proba=False):\n \n '''\n get indices of events by different spacecraft\n '''\n evtList = []\n begin = pds.to_datetime(begin, format=dateFormat)\n end = pds.to_datetime(end, format=dateFormat)\n for i in iwinind:\n if (begin[i] < datetime.datetime(2021,2,3)):\n evtList.append(Event(begin[i], end[i]))\n if get_proba is True:\n for i, elt in enumerate(evtList):\n elt.proba = df['proba'][i]\n return evtList\n\n\ndef get_similarity(index, width, evtList):\n '''\n For a given list of event and a given window size (in hours) and\n a datetime index, return the associated serie of similarities\n '''\n y = np.zeros(len(index))\n for i, date in enumerate(index):\n window = Event(date-datetime.timedelta(hours=int(width)/2),\n date+datetime.timedelta(hours=int(width)/2))\n seum = [similarity(x, window)for x in evtList if (window.begin < x.end) and (window.end > x.begin)]\n if len(seum) > 0:\n y[i] = max(seum)\n return pds.Series(index=index, data=y)\n\ndef overlapWithList(ref_event, event_list, percent=False):\n '''\n return the list of the overlaps between an event and the elements of\n an event list\n Have the possibility to have it as the percentage of fthe considered event\n in the list\n '''\n if percent:\n return [overlap(ref_event, elt)/elt.duration for elt in event_list]\n else:\n return [overlap(ref_event, elt) for elt in event_list]\n\n\ndef choseEventFromList(ref_event, event_list, choice='first'):\n '''\n return an event from even_list according to the choice adopted\n first return the first of the lists\n last return the last of the lists\n best return the one with max overlap\n merge return the combination of all of them\n '''\n if choice == 'first':\n return event_list[0]\n if choice == 'last':\n return event_list[-1]\n if choice == 'best':\n return event_list[np.argmax(overlapWithList(ref_event, event_list))]\n if choice == 'merge':\n return evt.merge(event_list[0], event_list[-1])\n\n\ndef forceAspect(ax,aspect):\n im = ax.get_images()\n extent = im[0].get_extent()\n ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)\n\n \ndef heatplot(data, start, end, delta, i, typ, prediction, similarities):\n\n sns.set_style('darkgrid')\n sns.set_context('paper')\n \n fig=plt.figure(figsize=(9,6), dpi=150)\n \n similarities.index = pds.to_datetime(similarities.index)\n\n data = data[start-datetime.timedelta(hours=delta):\n end+datetime.timedelta(hours=delta)]\n\n #sharex means that zooming in works with all subplots\n ax1 = plt.subplot(311) \n\n ax1.plot_date(data.index, data['bx'],'-r',label='Bx',linewidth=0.5)\n ax1.plot_date(data.index, data['by'],'-g',label='By',linewidth=0.5)\n ax1.plot_date(data.index, data['bz'],'-b',label='Bz',linewidth=0.5)\n ax1.plot_date(data.index, data['bt'],'-k',label='Btotal',lw=0.5)\n \n #plot vertical lines\n ax1.plot_date([start,start],[-500,500],'-k',linewidth=1) \n ax1.plot_date([end,end],[-500,500],'-k',linewidth=1)\n \n plt.ylabel('B [nT]')\n plt.legend(loc=3,ncol=4,fontsize=8)\n \n# try:\n ax1.set_ylim(-np.nanmax(data['bt'])-5,np.nanmax(data['bt'])+5) \n# except ValueError: #raised if `y` is empty.\n# pass\n \n plt.setp(ax1.get_xticklabels(), visible=False)\n\n plt.title(typ+' start: '+start.strftime(\"%Y-%b-%d %H:%M\")+' end: '+end.strftime(\"%Y-%b-%d %H:%M\"))\n \n ax2 = plt.subplot(312) \n \n \n im = pds.DataFrame(prediction[start-datetime.timedelta(hours=delta):\n end+datetime.timedelta(hours=delta)])\n \n im.index = data.index\n \n ax2.imshow(im.T, cmap='cividis')\n \n plt.setp(ax2.get_xticklabels(), visible=False)\n plt.setp(ax2.get_yticklabels(), visible=False)\n plt.ylabel('Predicted Similarity')\n\n forceAspect(ax2,aspect=9)\n \n\n \n\n ax3 = plt.subplot(313) \n sim = pds.DataFrame(similarities[start-datetime.timedelta(hours=delta):\n end+datetime.timedelta(hours=delta)])\n\n \n ax3.imshow(sim.T, cmap='cividis')\n \n plt.setp(ax3.get_xticklabels(), visible=False)\n plt.setp(ax3.get_yticklabels(), visible=False)\n \n print(type(sim))\n \n plt.ylabel('Expected Similarity')\n\n forceAspect(ax3,aspect=9)\n \n ax1.get_shared_y_axes().join(ax2,ax3)\n\n plt.tight_layout()\n plt.show()\n\n\n #plotfile=typ+'ICME'+' data, start: '+start.strftime(\"%Y-%b-%d %H:%M\")+' end: '+end.strftime(\"%Y-%b-%d %H:%M\")+'.png'\n \n #plt.savefig(plotfile)\n #print('saved as ',plotfile)\n \ndef plot_insitu_icmecat_mag_plasma_nopred(data, start, end, delta, i, typ):\n \n \n sns.set_style('darkgrid')\n sns.set_context('paper')\n \n fig=plt.figure(figsize=(9,6), dpi=150)\n \n data = data[start-datetime.timedelta(hours=delta):\n end+datetime.timedelta(hours=delta)]\n \n #sharex means that zooming in works with all subplots\n ax1 = plt.subplot(411) \n\n ax1.plot_date(data.index, data['bx'],'-r',label='Bx',linewidth=0.5)\n ax1.plot_date(data.index, data['by'],'-g',label='By',linewidth=0.5)\n ax1.plot_date(data.index, data['bz'],'-b',label='Bz',linewidth=0.5)\n ax1.plot_date(data.index, data['bt'],'-k',label='Btotal',lw=0.5)\n \n #plot vertical lines\n ax1.plot_date([start,start],[-500,500],'-k',linewidth=1) \n ax1.plot_date([end,end],[-500,500],'-k',linewidth=1)\n \n plt.ylabel('B [nT]')\n plt.legend(loc=3,ncol=4,fontsize=8)\n \n ax1.set_ylim(-np.nanmax(data['bt'])-5,np.nanmax(data['bt'])+5) \n \n \n plt.setp(ax1.get_xticklabels(), visible=False)\n\n plt.title(typ+'ICME'+' data, start: '+start.strftime(\"%Y-%b-%d %H:%M\")+' end: '+end.strftime(\"%Y-%b-%d %H:%M\"))\n \n ax2 = plt.subplot(412,sharex=ax1) \n ax2.plot_date(data.index,data['vt'],'-k',label='V',linewidth=0.7)\n \n\n #plot vertical lines\n ax2.plot_date([start,start],[0,3000],'-k',linewidth=1) \n ax2.plot_date([end,end],[0,3000],'-k',linewidth=1)\n\n\n plt.ylabel('V [km/s]')\n \n #check plasma data exists\n if np.isnan(np.nanmin(data['vt']))==False:\n ax2.set_ylim(np.nanmin(data['vt'])-20,np.nanmax(data['vt'])+100) \n \n \n plt.setp(ax2.get_xticklabels(), visible=False)\n\n\n ax3 = plt.subplot(413,sharex=ax1) \n ax3.plot_date(data.index,data['np'],'-k',label='Np',linewidth=0.7)\n \n #plot vertical lines\n ax3.plot_date([start,start],[0,1000],'-k',linewidth=1) \n ax3.plot_date([end,end],[0,1000],'-k',linewidth=1)\n\n plt.ylabel('N [ccm-3]')\n \n if np.isnan(np.nanmin(data['np']))==False:\n ax3.set_ylim(0,np.nanmax(data['np'])+10) \n \n \n plt.setp(ax3.get_xticklabels(), visible=False)\n\n\n ax4 = plt.subplot(414,sharex=ax1) \n ax4.plot_date(data.index,data['tp']/1e6,'-k',label='Tp',linewidth=0.7)\n \n #plot vertical lines\n ax4.plot_date([start,start],[0,10],'-k',linewidth=1) \n ax4.plot_date([end,end],[0,10],'-k',linewidth=1)\n\n\n plt.ylabel('T [MK]')\n \n if np.isnan(np.nanmin(data['tp']))==False:\n ax4.set_ylim(0,np.nanmax(data['tp']/1e6)+0.2) \n\n \n \n plt.tight_layout()\n plt.show()\n\n\n #plotfile=typ+'ICME'+' data, start: '+start.strftime(\"%Y-%b-%d %H:%M\")+' end: '+end.strftime(\"%Y-%b-%d %H:%M\")+'.png'\n \n #plt.savefig(plotfile)\n #print('saved as ',plotfile)\n \n \ndef plot_insitu_icmecat_mag_plasma(data, start, end, delta, i, typ, predstart, predend):\n \n \n sns.set_style('darkgrid')\n sns.set_context('paper')\n \n fig=plt.figure(figsize=(9,6), dpi=150)\n \n data = data[start-datetime.timedelta(hours=delta):\n end+datetime.timedelta(hours=delta)]\n \n #sharex means that zooming in works with all subplots\n ax1 = plt.subplot(411) \n\n ax1.plot_date(data.index, data['bx'],'-r',label='Bx',linewidth=0.5)\n ax1.plot_date(data.index, data['by'],'-g',label='By',linewidth=0.5)\n ax1.plot_date(data.index, data['bz'],'-b',label='Bz',linewidth=0.5)\n ax1.plot_date(data.index, data['bt'],'-k',label='Btotal',lw=0.5)\n \n #plot vertical lines\n ax1.plot_date([start,start],[-500,500],'-k',label = 'true event',linewidth=1) \n ax1.plot_date([end,end],[-500,500],'-k',linewidth=1)\n ax1.plot_date([predstart,predstart],[-500,500],'-r',label = 'predicted event',linewidth=1) \n ax1.plot_date([predend,predend],[-500,500],'-r',linewidth=1) \n \n plt.ylabel('B [nT]')\n plt.legend(loc=3,ncol=4,fontsize=8)\n \n ax1.set_ylim(-np.nanmax(data['bt'])-5,np.nanmax(data['bt'])+5) \n \n \n plt.setp(ax1.get_xticklabels(), visible=False)\n\n plt.title(typ+'ICME'+' data, start: '+start.strftime(\"%Y-%b-%d %H:%M\")+' end: '+end.strftime(\"%Y-%b-%d %H:%M\"))\n \n ax2 = plt.subplot(412,sharex=ax1) \n ax2.plot_date(data.index,data['vt'],'-k',label='V',linewidth=0.7)\n \n\n #plot vertical lines\n ax2.plot_date([start,start],[0,3000],'-k',label = 'true event',linewidth=1) \n ax2.plot_date([end,end],[0,3000],'-k',linewidth=1) \n ax2.plot_date([predstart,predstart],[0,3000],'-r',label = 'predicted event',linewidth=1) \n ax2.plot_date([predend,predend],[0,3000],'-r',linewidth=1) \n\n\n plt.ylabel('V [km/s]')\n \n #check plasma data exists\n if np.isnan(np.nanmin(data['vt']))==False:\n ax2.set_ylim(np.nanmin(data['vt'])-20,np.nanmax(data['vt'])+100) \n \n \n plt.setp(ax2.get_xticklabels(), visible=False)\n\n\n ax3 = plt.subplot(413,sharex=ax1) \n ax3.plot_date(data.index,data['np'],'-k',label='Np',linewidth=0.7)\n \n #plot vertical lines\n ax3.plot_date([start,start],[0,1000],'-k',label = 'true event',linewidth=1) \n ax3.plot_date([end,end],[0,1000],'-k',linewidth=1) \n ax3.plot_date([predstart,predstart],[0,1000],'-r',label = 'predicted event',linewidth=1) \n ax3.plot_date([predend,predend],[0,1000],'-r',linewidth=1) \n\n plt.ylabel('N [ccm-3]')\n \n if np.isnan(np.nanmin(data['np']))==False:\n ax3.set_ylim(0,np.nanmax(data['np'])+10) \n \n \n plt.setp(ax3.get_xticklabels(), visible=False)\n\n\n ax4 = plt.subplot(414,sharex=ax1) \n ax4.plot_date(data.index,data['tp']/1e6,'-k',label='Tp',linewidth=0.7)\n \n #plot vertical lines\n ax4.plot_date([start,start],[0,10],'-k',label = 'true event',linewidth=1) \n ax4.plot_date([end,end],[0,10],'-k',linewidth=1) \n ax4.plot_date([predstart,predstart],[0,10],'-r',label = 'predicted event',linewidth=1) \n ax4.plot_date([predend,predend],[0,10],'-r',linewidth=1) \n\n\n plt.ylabel('T [MK]')\n \n if np.isnan(np.nanmin(data['tp']))==False:\n ax4.set_ylim(0,np.nanmax(data['tp']/1e6)+0.2) \n\n \n \n plt.tight_layout()\n plt.show()\n\n\n #plotfile=typ+'ICME'+' data, start: '+start.strftime(\"%Y-%b-%d %H:%M\")+' end: '+end.strftime(\"%Y-%b-%d %H:%M\")+'.png'\n \n #plt.savefig(plotfile)\n #print('saved as ',plotfile)"
] |
[
[
"numpy.nanmax",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"pandas.to_datetime",
"pandas.Series",
"matplotlib.pyplot.figure",
"numpy.nanmin",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
sarang-apps/darshan_browser
|
[
"173649bb8a7c656dc60784d19e7bb73e07c20daa"
] |
[
"tools/android/native_lib_memory/process_residency.py"
] |
[
"#!/usr/bin/python\n# Copyright 2017 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"From a native code residency dump created by log_residency.cc, generate a\nvisual timeline, and serialize the parsed data to JSON.\n\"\"\"\n\nimport argparse\nimport json\nimport logging\nfrom matplotlib import collections as mc\nfrom matplotlib import pylab as plt\nimport numpy as np\n\n\ndef CreateArgumentParser():\n \"\"\"Creates and returns an argument parser.\"\"\"\n parser = argparse.ArgumentParser(\n description='Reads and shows native library residency data.')\n parser.add_argument('--dump', type=str, required=True, help='Residency dump')\n parser.add_argument('--output', type=str, required=True,\n help='Output filename in text format')\n parser.add_argument('--json', type=str, help='Output filename in JSON output')\n return parser\n\n\ndef ParseDump(filename):\n \"\"\"Parses a residency dump, as generated from orderfile_instrumentation.cc.\n\n Args:\n filename: (str) dump filename.\n\n Returns:\n {\"start\": offset, \"end\": offset,\n \"residency\": {timestamp (int): data ([bool])}}\n \"\"\"\n result = {}\n with open(filename, 'r') as f:\n (start, end) = f.readline().strip().split(' ')\n result = {'start': int(start), 'end': int(end), 'residency': {}}\n for line in f:\n line = line.strip()\n timestamp, data = line.split(' ')\n data_array = [x == '1' for x in data]\n result['residency'][int(timestamp)] = data_array\n return result\n\n\ndef WriteJsonOutput(data, filename):\n \"\"\"Serializes the parsed data to JSON.\n\n Args:\n data: (dict) As returned by ParseDump()\n filename: (str) output filename.\n\n JSON format:\n {'offset': int, 'data': {\n relative_timestamp: [{'page_offset': int, 'resident': bool}]}}\n\n Where:\n - offset is the code start offset into its page\n - relative_timestamp is the offset in ns since the first measurement\n - page_offset is the page offset in bytes\n \"\"\"\n result = {'offset': data['start'], 'data': {}}\n start_timestamp = min(data['residency'].keys())\n for timestamp in data['residency']:\n adjusted_timestamp = timestamp - start_timestamp\n result[adjusted_timestamp] = []\n residency = data['residency'][timestamp]\n for (index, resident) in enumerate(residency):\n result[adjusted_timestamp].append(\n {'offset': index * (1 << 12), 'resident': resident})\n with open(filename, 'w') as f:\n json.dump(result, f)\n\n\ndef PlotResidency(data, output_filename):\n \"\"\"Creates a graph of residency.\n\n Args:\n data: (dict) As returned by ParseDump().\n output_filename: (str) Output filename.\n \"\"\"\n residency = data['residency']\n max_percentage = max((100. * sum(d)) / len(d) for d in residency.values())\n logging.info('Max residency = %.2f%%', max_percentage)\n\n start = data['start']\n end = data['end']\n _, ax = plt.subplots(figsize=(20, 10))\n timestamps = sorted(residency.keys())\n x_max = len(residency.values()[0]) * 4096\n for t in timestamps:\n offset_ms = (t - timestamps[0]) / 1e6\n incore = [i * 4096 for (i, x) in enumerate(residency[t]) if x]\n outcore = [i * 4096 for (i, x) in enumerate(residency[t]) if not x]\n percentage = 100. * len(incore) / (len(incore) + len(outcore))\n plt.text(x_max, offset_ms, '%.1f%%' % percentage)\n for (d, color) in ((incore, (.2, .6, .05, 1)), (outcore, (1, 0, 0, 1))):\n segments = [[(x, offset_ms), (x + 4096, offset_ms)] for x in d]\n colors = np.array([color] * len(segments))\n lc = mc.LineCollection(segments, colors=colors, linewidths=8)\n ax.add_collection(lc)\n\n plt.axvline(start)\n plt.axvline(end)\n plt.title('Code residency vs time since startup.')\n plt.xlabel('Code page offset (bytes)')\n plt.ylabel('Time since startup (ms)')\n plt.ylim(0, ymax=(timestamps[-1] - timestamps[0]) / 1e6)\n plt.xlim(xmin=0, xmax=x_max)\n plt.savefig(output_filename, bbox_inches='tight', dpi=300)\n\n\ndef main():\n parser = CreateArgumentParser()\n args = parser.parse_args()\n logging.basicConfig(level=logging.INFO)\n logging.info('Parsing the data')\n data = ParseDump(args.dump)\n if args.json:\n WriteJsonOutput(data, args.json)\n logging.info('Plotting the results')\n PlotResidency(data, args.output)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.pylab.xlim",
"matplotlib.collections.LineCollection",
"matplotlib.pylab.text",
"matplotlib.pylab.title",
"matplotlib.pylab.xlabel",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.subplots",
"matplotlib.pylab.ylim",
"matplotlib.pylab.savefig",
"matplotlib.pylab.axvline"
]
] |
mbkumar/numpy
|
[
"0645461254a2110438b6df63ef193c1138c306ec"
] |
[
"numpy/typing/tests/data/pass/array_constructors.py"
] |
[
"from typing import List, Any\nimport numpy as np\n\nclass Index:\n def __index__(self) -> int:\n return 0\n\nclass SubClass(np.ndarray): ...\n\ni8 = np.int64(1)\n\nA = np.array([1])\nB = A.view(SubClass).copy()\nC = [1]\n\ndef func(i: int, j: int, **kwargs: Any) -> SubClass:\n return B\n\nnp.array(1, dtype=float)\nnp.array(1, copy=False)\nnp.array(1, order='F')\nnp.array(1, order=None)\nnp.array(1, subok=True)\nnp.array(1, ndmin=3)\nnp.array(1, str, copy=True, order='C', subok=False, ndmin=2)\n\nnp.asarray(A)\nnp.asarray(B)\nnp.asarray(C)\n\nnp.asanyarray(A)\nnp.asanyarray(B)\nnp.asanyarray(B, dtype=int)\nnp.asanyarray(C)\n\nnp.ascontiguousarray(A)\nnp.ascontiguousarray(B)\nnp.ascontiguousarray(C)\n\nnp.asfortranarray(A)\nnp.asfortranarray(B)\nnp.asfortranarray(C)\n\nnp.require(A)\nnp.require(B)\nnp.require(B, dtype=int)\nnp.require(B, requirements=None)\nnp.require(B, requirements=\"E\")\nnp.require(B, requirements=[\"ENSUREARRAY\"])\nnp.require(B, requirements={\"F\", \"E\"})\nnp.require(B, requirements=[\"C\", \"OWNDATA\"])\nnp.require(B, requirements=\"W\")\nnp.require(B, requirements=\"A\")\nnp.require(C)\n\nnp.linspace(0, 2)\nnp.linspace(0.5, [0, 1, 2])\nnp.linspace([0, 1, 2], 3)\nnp.linspace(0j, 2)\nnp.linspace(0, 2, num=10)\nnp.linspace(0, 2, endpoint=True)\nnp.linspace(0, 2, retstep=True)\nnp.linspace(0j, 2j, retstep=True)\nnp.linspace(0, 2, dtype=bool)\nnp.linspace([0, 1], [2, 3], axis=Index())\n\nnp.logspace(0, 2, base=2)\nnp.logspace(0, 2, base=2)\nnp.logspace(0, 2, base=[1j, 2j], num=2)\n\nnp.geomspace(1, 2)\n\nnp.zeros_like(A)\nnp.zeros_like(C)\nnp.zeros_like(B)\nnp.zeros_like(B, dtype=np.int64)\n\nnp.ones_like(A)\nnp.ones_like(C)\nnp.ones_like(B)\nnp.ones_like(B, dtype=np.int64)\n\nnp.empty_like(A)\nnp.empty_like(C)\nnp.empty_like(B)\nnp.empty_like(B, dtype=np.int64)\n\nnp.full_like(A, i8)\nnp.full_like(C, i8)\nnp.full_like(B, i8)\nnp.full_like(B, i8, dtype=np.int64)\n\nnp.ones(1)\nnp.ones([1, 1, 1])\n\nnp.full(1, i8)\nnp.full([1, 1, 1], i8)\n\nnp.indices([1, 2, 3])\nnp.indices([1, 2, 3], sparse=True)\n\nnp.fromfunction(func, (3, 5))\n\nnp.identity(10)\n"
] |
[
[
"numpy.linspace",
"numpy.asarray",
"numpy.zeros_like",
"numpy.ones_like",
"numpy.empty_like",
"numpy.full",
"numpy.asanyarray",
"numpy.fromfunction",
"numpy.ascontiguousarray",
"numpy.logspace",
"numpy.asfortranarray",
"numpy.full_like",
"numpy.int64",
"numpy.identity",
"numpy.require",
"numpy.array",
"numpy.indices",
"numpy.ones",
"numpy.geomspace"
]
] |
Whatsoever/SurfComp
|
[
"4887162fef765d0c84e2bb72f0c26974aef30aec"
] |
[
"src/Sorption_PB_functions/PB_coup_four_layer_2try_2v.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 3 12:42:20 2019\n\n@author: DaniJ\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 22 15:58:31 2019\n@author: DaniJ\n\"\"\"\n\nimport numpy as np\nimport scipy as sp\nfrom bvp import solve_bvp\nfrom scipy import linalg\n#from four_layer_model_2try_withFixSpeciesOption_Scaling import four_layer_model_2try_withFixSpeciesOption_Scaling as flm\nimport matplotlib.pyplot as plt\n'''\n In this first try we will assume that the vector of unknowns is composed in the following order:\n'''\n\n\ndef PB_and_fourlayermodel (T, X_guess, A, Z, log_k, idx_Aq, pos_psi_S1_vec, pos_psi_S2_vec, temp, sS1, aS1, sS2, aS2, e, CapacitancesS1, CapacitancesS2, x, idx_fix_species = None, zel=1, tolerance = 1e-6, max_iterations = 100,scalingRC = True):\n counter_iterations = 0\n abs_err = tolerance + 1\n if idx_fix_species != None:\n X_guess [idx_fix_species] = T [idx_fix_species]\n while abs_err>tolerance and counter_iterations < max_iterations:\n # Calculate Y\n [Y, T] = func_NR_FLM (X_guess, A, log_k, temp, idx_Aq, sS1, aS1, sS2, aS2, e, CapacitancesS1, CapacitancesS2, T, Z, zel, pos_psi_S1_vec, pos_psi_S2_vec, x, idx_fix_species)\n # Calculate Z\n J = Jacobian_NR_FLM (X_guess, A, log_k, temp, idx_Aq, sS1, aS1, sS2, aS2, e, CapacitancesS1, CapacitancesS2, T, Z, zel, pos_psi_S1_vec, pos_psi_S2_vec, x, idx_fix_species)\n # Scaling technique is the RC technique from \"Thermodynamic Equilibrium Solutions Through a Modified Newton Raphson Method\"-Marianna Marinoni, J\u0002er^ome Carrayrou, Yann Lucas, and Philippe Ackerer (2016)\n if scalingRC == True:\n D1 = diagonal_row(J)\n D2 = diagonal_col(J)\n \n J_new = np.matmul(D1,np.matmul(J, D2))\n Y_new = np.matmul(D1, Y)\n delta_X_new = linalg.solve(J_new,-Y_new)\n delta_X = np.matmul(D2, delta_X_new)\n else:\n # Calculating the diff, Delta_X\n delta_X = linalg.solve(J,-Y)\n #print(delta_X))\n # Relaxation factor borrow from Craig M.Bethke to avoid negative values\n max_1 = 1\n max_2 =np.amax(-2*np.multiply(delta_X, 1/X_guess))\n Max_f = np.amax([max_1, max_2])\n Del_mul = 1/Max_f\n X_guess=X_guess + Del_mul*delta_X\n #print(X_guess)\n Xmod=X_guess.copy()\n for i in range(len(X_guess)):\n if X_guess[i]<=0:\n Xmod[i]=1\n log_C = log_k + np.matmul(A,np.log10(Xmod))\n # transf\n C = 10**(log_C)\n u = np.matmul(A.transpose(),C)\n \n # Vector_error \n d = u-T\n print(d)\n if idx_fix_species != None:\n d[idx_fix_species] =0\n abs_err = max(abs(d))\n \n counter_iterations += 1\n if counter_iterations >= max_iterations:\n raise ValueError('Max number of iterations surpassed.') \n # Speciation - mass action law\n Xmod=X_guess.copy()\n for i in range(len(X_guess)):\n if X_guess[i]<=0:\n Xmod[i]=1\n log_C = log_k + np.matmul(A,np.log10(Xmod))\n # transf\n C = 10**(log_C)\n return X_guess, C\n\ndef func_NR_FLM (X, A, log_k, temp, idx_Aq, sS1, aS1, sS2, aS2, e, CapacitancesS1, CapacitancesS2, T, Z, zel, pos_psi_S1_vec, pos_psi_S2_vec, x, idx_fix_species=None):\n \"\"\"\n This function is supossed to be linked to the four_layer_two_surface_speciation function.\n It just gave the evaluated vector of Y, and T for the Newton-raphson procedure.\n The formulation of Westall (1980) is followed.\n FLM = four layer model\n \"\"\"\n # Speciation - mass action law\n Xmod=X.copy()\n for i in range(len(X)):\n if X[i]<=0:\n Xmod[i]=1\n log_C = log_k + np.matmul(A,np.log10(Xmod))\n # transf\n C = 10**(log_C)\n # Update T - \"Electrostatic parameters\"\n 'Notice that the last term of the psi_S1/2_v is not transformed from the boltzmann factor to the electrostatic potential!!!!!!!!!!!!'\n psi_S1_v = [Boltzman_factor_2_psi(X[pos_psi_S1_vec[0]], temp), Boltzman_factor_2_psi(X[pos_psi_S1_vec[1]], temp), Boltzman_factor_2_psi(X[pos_psi_S1_vec[2]], temp), X[pos_psi_S1_vec[3]]]\n psi_S2_v = [Boltzman_factor_2_psi(X[pos_psi_S2_vec[0]], temp), Boltzman_factor_2_psi(X[pos_psi_S2_vec[1]], temp), Boltzman_factor_2_psi(X[pos_psi_S2_vec[2]], temp), X[pos_psi_S2_vec[3]]] \n C_aq = C[idx_Aq]\n #\n T = Update_T_FLM(T, sS1, sS2, e, temp, aS1, aS2, Z,CapacitancesS1, CapacitancesS2, psi_S1_v, psi_S2_v, zel, pos_psi_S1_vec, pos_psi_S2_vec, C_aq, x)\n \n # Calculation of Y\n Y= np.matmul(A.transpose(),C)-T\n return Y, T\n\n\ndef Update_T_FLM(T, sS1, sS2, e, temp, aS1, aS2, Z,CapacitancesS1, CapacitancesS2, psi_S1_v, psi_S2_v, zel, pos_psi_S1_vec, pos_psi_S2_vec, C_aq, x):\n \n # constant\n F = 96485.3328959 # C/mol\n R = 8.314472 # J/(K*mol)\n eo = 8.854187871e-12 # Farrads = F/m - permittivity in vaccuum\n #e = 1.602176620898e-19 # C\n kb = 1.38064852e-23 # J/K other units --> kb=8,6173303e-5 eV/K\n Na = 6.022140857e23 # 1/mol\n elec_charge = 1.60217662e-19 #electron charge in C\n ########## S1 #####################\n sigma_S1_0 = CapacitancesS1[0]*(psi_S1_v[0]-psi_S1_v[1])\n sigma_S1_alpha = -sigma_S1_0 + CapacitancesS1[1]*(psi_S1_v[1]-psi_S1_v[2])\n sigma_S1_beta = -sigma_S1_0-sigma_S1_alpha+CapacitancesS1[2]*(psi_S1_v[2]-psi_S1_v[3])\n sigma_S1_gamma = -sigma_S1_0 - sigma_S1_alpha - sigma_S1_beta\n ########## S2 #####################\n sigma_S2_0 = CapacitancesS2[0]*(psi_S2_v[0]-psi_S2_v[1])\n sigma_S2_alpha = -sigma_S2_0 + CapacitancesS2[1]*(psi_S2_v[1]-psi_S2_v[2])\n sigma_S2_beta = -sigma_S2_0-sigma_S2_alpha+CapacitancesS2[2]*(psi_S2_v[2]-psi_S2_v[3])\n sigma_S2_gamma = -sigma_S2_0 - sigma_S2_alpha - sigma_S2_beta\n ########## T S1 #####################\n T_S1_0 = ((sS1*aS1)/F)*sigma_S1_0; # units mol/L or mol/kg\n T_S1_alpha = ((sS1*aS1)/F)*sigma_S1_alpha; # units mol/L or mol/kg\n T_S1_beta = ((sS1*aS1)/F)*sigma_S1_beta; # units mol/L or mol/kg\n ########## T S2 #####################\n T_S2_0 = ((sS2*aS2)/F)*sigma_S2_0; # units mol/L or mol/kg\n T_S2_alpha = ((sS2*aS2)/F)*sigma_S2_alpha; # units mol/L or mol/kg\n T_S2_beta = ((sS2*aS2)/F)*sigma_S2_beta; # units mol/L or mol/kg\n \n ################## PB part starts heres ########################################################################\n ew = eo*e\n Q = Z*elec_charge # Q is the charve of the aqueous elements times the electron charge \n C = C_aq\n A =Na* 1000/ew # a prefactor = Avogadro * 1000 /ew\n kbt = 1.38064852e-23 *temp # kb (J/K) * T in K\n y0 = np.zeros((2, x.size))\n y0[0,0] = psi_S1_v[3]\n 'I think that y0[1,0] and y0[1,-1] are not necessary to solve the problem, I would say that its values do not have implications. Although I am not 100% sure.'\n y0[1,0] = -sigma_S1_gamma/ew # The negative value that I am given here is extremely arbitrary I am not sure why. IT MUST BE DISCUSSED\n # y0[1,0] = dpsi_d dpsi_d = -(sig_0 + sig_b + sig_d)/ew # electric field at diffuse layer, x>d\n y0[0,-1]= psi_S2_v[3]\n y0[1,-1]= sigma_S2_gamma/ew\n args=[Q,C,A,kbt,y0]\n result = solve_bvp(fun_PB, bc_PB, x, y0, tol = 1e-4, args = args)\n #\n #plt.figure(1)\n #plt.plot(result.x, result.y[0])\n #plt.figure(2)\n #plt.plot(result.x, result.y[1])\n #assert 1==-1\n sigma_S1_d=-result.y[1][0]*ew\n sigma_S2_d=result.y[1][-1]*ew\n #\n T_S1_gammad = sigma_S1_gamma+sigma_S1_d\n T_S2_gammad = sigma_S2_gamma+sigma_S2_d\n # Now the values must be put in T\n T[pos_psi_S1_vec[0]] = T_S1_0\n T[pos_psi_S1_vec[1]] = T_S1_alpha\n T[pos_psi_S1_vec[2]] = T_S1_beta\n T[pos_psi_S1_vec[3]] = T_S1_gammad\n \n T[pos_psi_S2_vec[0]] = T_S2_0\n T[pos_psi_S2_vec[1]] = T_S2_alpha\n T[pos_psi_S2_vec[2]] = T_S2_beta\n T[pos_psi_S2_vec[3]] = T_S2_gammad\n \n return T\n\ndef Boltzman_factor_2_psi (x,temp):\n '''\n Transforms the equation from Xb = exp(-psi*F/RT) to psi = -ln(Xb)RT/F\n from Boltzman factor to electrostatic potential\n The units of \"temp\" (short for temperature) should be Kelvin\n '''\n R = 8.314472 # J/(K*mol)\n F = 96485.3328959 # C/mol \n D = R*temp\n psi = - np.log(x)*(D/F)\n return psi \n\n\n\ndef Jacobian_NR_FLM (X, A, log_k, temp, idx_Aq, sS1, aS1, sS2, aS2, e, CapacitancesS1, CapacitancesS2, T, Zi, zel, pos_psi_S1_vec, pos_psi_S2_vec, x, idx_fix_species=None):\n '''\n This function should give the Jacobian. Here The jacobian is calculated as Westall (1980), except the electrostatic terms that are slightly different.\n The reason is because there seems to be some typos in Westall paper.\n Also, if idx_fix_species is given then the rows of the unknown will be 1 for the unknown and 0 for the other points.\n '''\n # constant\n F = 96485.3328959 # C/mol [Faraday constant]\n R = 8.314472 # J/(K*mol) [universal constant gas]\n eo = 8.854187871e-12 # Farrads = F/m - permittivity in vaccuum\n elec_charge = 1.60217662e-19 #electron charge in C\n # Speciation - mass action law\n #log_C = log_k + A*np.log10(X)\n Xmod=X.copy()\n for i in range(len(X)):\n if X[i]<=0:\n Xmod[i]=1\n log_C = log_k + np.matmul(A,np.log10(Xmod))\n # transf\n C = 10**(log_C)\n C_aq = C[idx_Aq]\n #I = Calculate_ionic_strength(Z, C_aq)\n # instantiate Jacobian\n length_X = X.size\n Z = np.zeros((length_X,length_X))\n # First part is the common of the Jacbian derivation\n for i in range(0, length_X):\n for j in range(0, length_X):\n Z[i,j]= np.matmul(np.multiply(A[:,i], A[:,j]), (C/X[j]))\n # Now the electrostatic part must be modified, one question hang on the air:\n # Should we check that the electrostatic part is as we expected?\n ############S1#######################\n sa_F2S1 = (sS1*aS1)/(F*F)\n C1_sa_F2_RTS1 = sa_F2S1*CapacitancesS1[0]*R*temp\n # Assigning in Jacobian (plane 0)\n Z[pos_psi_S1_vec[0],pos_psi_S1_vec[0]]=Z[pos_psi_S1_vec[0],pos_psi_S1_vec[0]] + C1_sa_F2_RTS1/X[pos_psi_S1_vec[0]]\n Z[pos_psi_S1_vec[0],pos_psi_S1_vec[1]]=Z[pos_psi_S1_vec[0],pos_psi_S1_vec[1]] - C1_sa_F2_RTS1/X[pos_psi_S1_vec[1]]\n #### plane alpha\n C1C2_sa_F2_RTS1 = sa_F2S1*R*temp*(CapacitancesS1[0]+CapacitancesS1[1])\n C2_sa_F2_RTS1 = sa_F2S1*CapacitancesS1[1]*R*temp\n # Assigning in Jacobian (plane alpha)\n Z[pos_psi_S1_vec[1],pos_psi_S1_vec[0]]=Z[pos_psi_S1_vec[1],pos_psi_S1_vec[0]] - C1_sa_F2_RTS1/X[pos_psi_S1_vec[0]]\n Z[pos_psi_S1_vec[1],pos_psi_S1_vec[1]]=Z[pos_psi_S1_vec[1],pos_psi_S1_vec[1]] + C1C2_sa_F2_RTS1/X[pos_psi_S1_vec[1]]\n Z[pos_psi_S1_vec[1],pos_psi_S1_vec[2]]= Z[pos_psi_S1_vec[1],pos_psi_S1_vec[2]] - C2_sa_F2_RTS1/X[pos_psi_S1_vec[2]]\n #### plane beta\n C3C2_sa_F2_RTS1 = sa_F2S1*R*temp*(CapacitancesS1[1]+CapacitancesS1[2])\n C3_sa_F2_RTS1 = sa_F2S1*CapacitancesS1[2]*R*temp\n # Assigning in Jacobian (plane beta)\n Z[pos_psi_S1_vec[2],pos_psi_S1_vec[1]] = Z[pos_psi_S1_vec[2],pos_psi_S1_vec[1]] - C2_sa_F2_RTS1/X[pos_psi_S1_vec[1]]\n Z[pos_psi_S1_vec[2], pos_psi_S1_vec[2]] = Z[pos_psi_S1_vec[2],pos_psi_S1_vec[2]] + C3C2_sa_F2_RTS1/X[pos_psi_S1_vec[2]]\n Z[pos_psi_S1_vec[2], pos_psi_S1_vec[3]] = Z[pos_psi_S1_vec[2],pos_psi_S1_vec[3]] - CapacitancesS1[2]*((sS1*aS1)/F)\n #Z[pos_psi_S1_vec[2], pos_psi_S1_vec[3]] = Z[pos_psi_S1_vec[2],pos_psi_S1_vec[3]] - C3_sa_F2_RTS1/X[pos_psi_S1_vec[3]]\n #### plane gamma [diffusive plane]\n #Z[pos_psi_S1_vec[3],pos_psi_S1_vec[2]] = Z[pos_psi_S1_vec[3],pos_psi_S1_vec[2]] - C3_sa_F2_RTS1/X[pos_psi_S1_vec[2]] \n # d_d plane\n #psi_d = Boltzman_factor_2_psi(X[pos_psi_S1_vec[3]], temp)\n #DY_Dpsid = -np.sqrt(8*1000*R*temp*e*eo*I)*np.cosh((zel*F*psi_d)/(2*R*temp))*((zel*F)/(2*R*temp)) - CapacitancesS1[2]\n #Dpsid_DpsidB = (-R*temp)/(F*X[pos_psi_S1_vec[3]])\n #Z[pos_psi_S1_vec[3], pos_psi_S1_vec[3]] = Z[pos_psi_S1_vec[3], pos_psi_S1_vec[3]] + (DY_Dpsid*Dpsid_DpsidB*((sS1*aS1)/F))\n\n#(Problably S1 and S2 can be enclosed in a for loop, reducing lines of code. If I have time and will, I will look at it.)\n ############S1#######################\n sa_F2S2 = (sS2*aS2)/(F*F)\n C1_sa_F2_RTS2 = sa_F2S2*CapacitancesS2[0]*R*temp\n # Assigning in Jacobian (plane 0)\n Z[pos_psi_S2_vec[0],pos_psi_S2_vec[0]]=Z[pos_psi_S2_vec[0],pos_psi_S2_vec[0]] + C1_sa_F2_RTS2/X[pos_psi_S2_vec[0]]\n Z[pos_psi_S2_vec[0],pos_psi_S2_vec[1]]=Z[pos_psi_S2_vec[0],pos_psi_S2_vec[1]] - C1_sa_F2_RTS2/X[pos_psi_S2_vec[1]]\n #### plane alpha\n C1C2_sa_F2_RTS2 = sa_F2S2*R*temp*(CapacitancesS2[0]+CapacitancesS2[1])\n C2_sa_F2_RTS2 = sa_F2S2*CapacitancesS2[1]*R*temp\n # Assigning in Jacobian (plane alpha)\n Z[pos_psi_S2_vec[1],pos_psi_S2_vec[0]]=Z[pos_psi_S2_vec[1],pos_psi_S2_vec[0]] - C1_sa_F2_RTS2/X[pos_psi_S2_vec[0]]\n Z[pos_psi_S2_vec[1],pos_psi_S2_vec[1]]=Z[pos_psi_S2_vec[1],pos_psi_S2_vec[1]] + C1C2_sa_F2_RTS2/X[pos_psi_S2_vec[1]]\n Z[pos_psi_S2_vec[1],pos_psi_S2_vec[2]]= Z[pos_psi_S2_vec[1],pos_psi_S2_vec[2]] - C2_sa_F2_RTS2/X[pos_psi_S2_vec[2]]\n #### plane beta\n C3C2_sa_F2_RTS2 = sa_F2S2*R*temp*(CapacitancesS2[1]+CapacitancesS2[2])\n C3_sa_F2_RTS2 = sa_F2S2*CapacitancesS2[2]*R*temp\n # Assigning in Jacobian (plane beta)\n Z[pos_psi_S2_vec[2],pos_psi_S2_vec[1]] = Z[pos_psi_S2_vec[2],pos_psi_S2_vec[1]] - C2_sa_F2_RTS2/X[pos_psi_S2_vec[1]]\n Z[pos_psi_S2_vec[2], pos_psi_S2_vec[2]] = Z[pos_psi_S2_vec[2],pos_psi_S2_vec[2]] + C3C2_sa_F2_RTS2/X[pos_psi_S2_vec[2]]\n Z[pos_psi_S2_vec[2], pos_psi_S2_vec[3]] = Z[pos_psi_S2_vec[2],pos_psi_S2_vec[3]] - CapacitancesS2[2]*((sS2*aS2)/F)\n #Z[pos_psi_S2_vec[2], pos_psi_S2_vec[3]] = Z[pos_psi_S2_vec[2],pos_psi_S2_vec[3]] - C3_sa_F2_RTS2/X[pos_psi_S2_vec[3]]\n #### plane gamma [diffusive plane]\n #Z[pos_psi_S2_vec[3],pos_psi_S2_vec[2]] = Z[pos_psi_S2_vec[3],pos_psi_S2_vec[2]] - C3_sa_F2_RTS2/X[pos_psi_S2_vec[2]] \n # d_d plane\n #psi_dS2 = Boltzman_factor_2_psi(X[pos_psi_S2_vec[3]], temp)\n #DY_Dpsid = -np.sqrt(8*1000*R*temp*e*eo*I)*np.cosh((zel*F*psi_dS2)/(2*R*temp))*((zel*F)/(2*R*temp)) - CapacitancesS2[2]\n #Dpsid_DpsidB = (-R*temp)/(F*X[pos_psi_S2_vec[3]])\n #Z[pos_psi_S2_vec[3], pos_psi_S2_vec[3]] = Z[pos_psi_S2_vec[3], pos_psi_S2_vec[3]] + (DY_Dpsid*Dpsid_DpsidB*((sS2*aS2)/F))\n \n \n #### plane gamma [diffusive plane]\n dpsiA_dXpsiA = (-R*temp)/(F*X[pos_psi_S2_vec[2]])\n Z[pos_psi_S1_vec[3],pos_psi_S1_vec[2]] =Z[pos_psi_S1_vec[3],pos_psi_S1_vec[2]] - CapacitancesS1[2]*dpsiA_dXpsiA\n Z[pos_psi_S2_vec[3],pos_psi_S2_vec[2]] =Z[pos_psi_S2_vec[3],pos_psi_S2_vec[2]] - CapacitancesS2[2]*dpsiA_dXpsiA\n #\n ew = eo*e\n Q = Zi*elec_charge # Q is the charve of the aqueous elements times the electron charge \n Cb = C_aq\n A =6.02214e23 * 1000/ew # a prefactor = Avogadro * 1000 /ew\n kbt = 1.38064852e-23 *temp # kb (J/K) * T in K\n delta_psi = 0.001\n \n y0 = np.zeros((2, x.size))\n y0[0,0] = X[pos_psi_S1_vec[3]]\n 'I think that y0[1,0] and y0[1,-1] are not necessary to solve the problem, I would say that its values do not have implications. Although I am not 100% sure.'\n y0[1,0] = (CapacitancesS1[2]*(X[pos_psi_S1_vec[3]]-Boltzman_factor_2_psi(X[pos_psi_S1_vec[2]], temp)))/ew # The negative value that I am given here is extremely arbitrary I am not sure why. IT MUST BE DISCUSSED\n # y0[1,0] = dpsi_d dpsi_d = -(sig_0 + sig_b + sig_d)/ew # electric field at diffuse layer, x>d \n y0[0,-1]= X[pos_psi_S2_vec[3]]\n y0[1,-1]= -(CapacitancesS2[2]*(X[pos_psi_S2_vec[3]]-Boltzman_factor_2_psi(X[pos_psi_S2_vec[2]], temp)))/ew\n \n y1 = np.zeros((2, x.size))\n y1[0,0] = X[pos_psi_S1_vec[3]]+delta_psi\n y1[1,0] = (CapacitancesS1[2]*((X[pos_psi_S1_vec[3]]+delta_psi)-Boltzman_factor_2_psi(X[pos_psi_S1_vec[2]], temp)))/ew\n y1[0,-1]= X[pos_psi_S2_vec[3]]\n y1[1,-1]= -(CapacitancesS2[2]*(X[pos_psi_S2_vec[3]]-Boltzman_factor_2_psi(X[pos_psi_S2_vec[2]], temp)))/ew\n \n y2 = np.zeros((2, x.size))\n y2[0,0] = X[pos_psi_S1_vec[3]]\n y2[1,0] = (CapacitancesS1[2]*((X[pos_psi_S1_vec[3]])-Boltzman_factor_2_psi(X[pos_psi_S1_vec[2]], temp)))/ew\n y2[0,-1]= X[pos_psi_S2_vec[3]]+delta_psi\n y2[1,-1]= -(CapacitancesS2[2]*((X[pos_psi_S2_vec[3]]+delta_psi)-Boltzman_factor_2_psi(X[pos_psi_S2_vec[2]], temp)))/ew\n \n args0=[Q,Cb,A,kbt,y0]\n args1=[Q,Cb,A,kbt,y1]\n args2=[Q,Cb,A,kbt,y2]\n \n #PB solving \n result0 = solve_bvp(fun_PB, bc_PB, x, y0, tol = 1e-4, args = args0)\n result1 = solve_bvp(fun_PB, bc_PB, x, y1, tol = 1e-4, args = args1)\n result2 = solve_bvp(fun_PB, bc_PB, x, y2, tol = 1e-4, args = args2)\n \n d_sigma_d_psi_S1= -(ew/delta_psi)*(result1.y[1,0]-result0.y[1, 0])\n d_sigma_d_psi_S2= (ew/delta_psi)*(result2.y[1,-1]-result0.y[1, -1])\n \n Z[pos_psi_S1_vec[3], pos_psi_S1_vec[3]] = CapacitancesS1[2] + d_sigma_d_psi_S1\n Z[pos_psi_S2_vec[3], pos_psi_S2_vec[3]] = CapacitancesS2[2] + d_sigma_d_psi_S2\n \n\n # finally just return Z\n if idx_fix_species != None:\n for d in idx_fix_species:\n v=np.zeros(length_X)\n v[d]=1\n Z[d,:] = v\n return Z\n \ndef fun_PB(x, y, args):\n Q = args[0]\n C = args[1]\n A = args[2]\n kbt = args[3]\n arg1 = np.zeros((x.size))\n for i in range(len(Q)):\n arg1 += Q[i]*C[i]*np.exp(-Q[i]*y[0]/kbt)\n arg1 = -A*arg1\n return np.vstack((y[1] , arg1))\n\ndef bc_PB(ya, yb, args):\n y0 = args[4]\n return np.array([ya[0]-y0[0,0] , yb[0]-y0[0,-1]])\n\n \ndef diagonal_row(J):\n num_rows = J.shape[0]\n D = np.zeros((num_rows,num_rows))\n for i in range(0,num_rows):\n D[i,i]=np.sqrt(linalg.norm(J[i,:], np.inf))\n return D\ndef diagonal_col(J):\n num_cols = J.shape[1]\n D = np.zeros((num_cols,num_cols))\n for i in range(0,num_cols):\n D[i,i]=np.sqrt(linalg.norm(J[:,i], np.inf))\n return D"
] |
[
[
"numpy.amax",
"numpy.log",
"numpy.multiply",
"numpy.matmul",
"scipy.linalg.solve",
"numpy.log10",
"scipy.linalg.norm",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] |
FizzerYu/CollaborativeVAE
|
[
"4714cce49acba258600b1b5bbcd3a1a4762385e6"
] |
[
"lib/test.py"
] |
[
"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.autograd import Variable\n\n\nallowed_activations = ['sigmoid', 'tanh', 'softmax', 'relu', 'linear']\nallowed_noises = [None, 'gaussian', 'mask']\nallowed_losses = ['rmse', 'cross-entropy']\n\n\nclass run_all(nn.Module): # AE\n def __init__(self, in_dim, hidden_dim, z_dim, activation):\n super(run_all, self).__init__()\n # rec\n activation = activation\n self.encoder = nn.Sequential(nn.Linear(in_dim, hidden_dim[0]), activation[0],\n nn.Linear( hidden_dim[0], hidden_dim[1]), activation[1])\n self.fc_z_mean = nn.Linear(hidden_dim[1], z_dim)\n self.fc_z_log_sigma = nn.Linear(hidden_dim[1], z_dim)\n #gen\n self.decoder = nn.Sequential(nn.Linear(z_dim, hidden_dim[1]), activation[0],\n nn.Linear( hidden_dim[1], hidden_dim[0]), activation[1])\n self.fc_gen = nn.Linear(hidden_dim[0], in_dim)\n # self.weights_init()\n\n def forward(self, x): #[b, in_dim]\n x = self.encoder(x)\n z_mean = self.fc_z_mean(x) # mu\n z_log_sigma_sq = self.fc_z_log_sigma(x) # log_var\n z = self.reparameterize(z_mean, z_log_sigma_sq)\n x_recon = self.decoder(z)\n x_recon = self.fc_gen(x_recon)\n x_recon = nn.functional.softmax(x_recon)\n return x_recon, z_mean, z_log_sigma_sq\n\n # 随机生成隐含向量\n def reparameterize(self, mu, log_var):\n std = torch.sqrt(torch.clamp(torch.exp(log_var), min = 1e-10))\n eps = torch.randn_like(std)\n return mu + eps * std\n def weights_init():\n self.encoder[0] = torch.nn.Parameter()\n self.encoder[2] = torch.nn.Parameter()\n\n self.encoder[0] = torch.nn.Parameter()\n self.encoder[2] = torch.nn.Parameter()\n\n\n\n\n\n\nmodel = run_all(10, [100,200],20,[nn.ReLU(), nn.ReLU()])\nprint(model.encoder[2])\n"
] |
[
[
"torch.randn_like",
"torch.nn.functional.softmax",
"torch.nn.Parameter",
"torch.exp",
"torch.nn.Linear",
"torch.nn.ReLU"
]
] |
lnies/lEval
|
[
"da9ce344a713c7fb46d53417e44a2f56956a1b60"
] |
[
"piicr-analysis/python_plotter_functions.py"
] |
[
"# ---------------------------------------------------------------------------\n# Written by Jonas Karthein in 2016/2017. Questions to [email protected]\n# ---------------------------------------------------------------------------\n\nimport matplotlib as mpl\nmpl.use('Qt5Agg')\n\nimport matplotlib.pyplot as plt\nimport numpy\nfrom mpl_toolkits.mplot3d import axes3d\nimport mle\nimport pylab as P\nimport math\nimport datetime\nfrom matplotlib.patches import Ellipse\nfrom matplotlib.offsetbox import AnchoredText\nfrom matplotlib import rcParams, gridspec\nimport time\nimport sys\nimport os\n\n\ndef python_plot(x_y_yerr, title, file_name, show_plot, legend_text, fit, fit_function, font, color_fit, color_points, fit_range, x_fit_min, x_fit_max, fit_b_parameter_start, plot_type, bins, markersize, grid_on_off, png_on_off, chi_sq_in_plot, diff_to_AME, diff_to_AME_err):\n \"\"\"\n Function to plot (+ fit) a dataset.\n\n The sheet should consist of a header ('name / unit') and at least data\n for x and y. Possible formats are:\n - x,y\n - x,y,yerr\n\n Possible fit functions are:\n 'linear', 'gauss', 'x^2', 'exp', 'sin'\n \"\"\"\n timestamp_plot = (int(time.time()*100))\n xss = []\n xss_str = []\n yss = []\n yerr = []\n yss2 = []\n color = {'green': 'g', 'blue': 'b', 'yellow': 'y', 'black': 'k', 'red': 'r'}\n for j in range(1, len(x_y_yerr), 1):\n if type(x_y_yerr[1][0]) == str:\n if x_y_yerr[1][0].isdigit() == False:\n xss = [x for x in range(len(x_y_yerr)-1)]\n xss_str.append(str(x_y_yerr[j][0]))\n else:\n xss.append(x_y_yerr[j][0])\n\n # xss.append(datetime.datetime.strptime(str(x_y_yerr[j][0]), '%m/%d/%Y %H:%M:%S'))\n if plot_type == 'scatter' or plot_type == '2Dhistogram' or plot_type == '2dhistogram-mcp' or plot_type == 'polar' or plot_type == '2lines-no-error' or plot_type == 'mcp':\n yss.append(x_y_yerr[j][1])\n if len(x_y_yerr[0]) == 3 and not plot_type == '2lines-no-error':\n yerr.append(x_y_yerr[j][2])\n elif len(x_y_yerr[0]) == 3 and plot_type == '2lines-no-error':\n yss2.append(x_y_yerr[j][2])\n\n if plot_type == '2Dhistogram' or plot_type == 'mcp' or plot_type == '2dhistogram-mcp':\n plt.figure(timestamp_plot, figsize=(8, 8), facecolor='white')\n else:\n if type(x_y_yerr[1][0]) == str:\n if x_y_yerr[1][0].isdigit() == False:\n plt.figure(timestamp_plot, figsize=(18, 8), facecolor='white')\n else:\n plt.figure(timestamp_plot, figsize=(9, 6), facecolor='white')\n\n if font == 'Utopia':\n mpl.rc('font', family='serif', serif='Utopia') # Utopia LaTeX font!!\n mpl.rc('text', usetex=False)\n elif font == 'LaTeX':\n mpl.rc('font', family='serif') # Standard LaTeX font!!\n mpl.rc('text', usetex=True)\n\n if plot_type == 'scatter':\n range_x = max(xss) - min(xss)\n range_y = max(yss) - min(yss)\n if yerr == []:\n plt.plot(xss, yss, '%s o' % color[color_points], label='%s' % legend_text, markersize=markersize)\n else:\n plt.errorbar(xss, yss, yerr=yerr, fmt='%s o' % color[color_points], label='%s' % legend_text, markersize=markersize) # , range=[43, 58]\n plt.ylabel('%s' % x_y_yerr[0][1], fontsize=22)\n plt.xlabel('%s' % x_y_yerr[0][0], fontsize=22)\n if type(xss[0]) == datetime.datetime:\n range_time = (max(xss)-min(xss)).total_seconds()\n if yerr == []:\n plt.axis([((min(xss) - datetime.timedelta(seconds=0.05*range_time))), ((max(xss) + datetime.timedelta(seconds=0.05*range_time))), min(yss) - 0.1 * range_y, max(yss) + 0.1 * range_y])\n else:\n plt.axis([((min(xss) - datetime.timedelta(seconds=0.05*range_time))), ((max(xss) + datetime.timedelta(seconds=0.05*range_time))), min(yss) - max(yerr) - 0.1 * range_y, max(yss) + max(yerr) + 0.1 * range_y])\n else:\n if yerr == []:\n plt.axis([min(xss) - 0.05 * range_x, max(xss) + 0.05 * range_x, min(yss) - 0.1 * range_y, max(yss) + 0.1 * range_y])\n else:\n plt.axis([min(xss) - 0.05 * range_x, max(xss) + 0.05 * range_x, min(yss) - max(yerr) - 0.1 * range_y, max(yss) + max(yerr) + 0.1 * range_y])\n if type(x_y_yerr[1][0]) == str:\n if x_y_yerr[1][0].isdigit() == False:\n plt.xticks(range(len(xss_str)), [str(i) for i in xss_str], rotation=80, size='small')\n plt.axis([min(xss) - 1, max(xss) + 1, min(yss) - max(yerr) - 0.1 * range_y, max(yss) + max(yerr) + 0.1 * range_y])\n elif plot_type == 'histogram':\n hist_x, xedges, patches = plt.hist(xss, bins=bins, histtype='step', stacked=True, fill=False)\n plt.ylabel('#', fontsize=22)\n plt.xlabel('%s' % x_y_yerr[0][0], fontsize=22)\n if type(xss[1]) == 'float' or type(xss[1]) == 'int':\n range_x = max(xedges) - min(xedges)\n range_y = max(hist_x) - min(hist_x)\n plt.axis([min(xedges) - 0.05 * range_x, max(xedges) + 0.05 * range_x, 0, max(hist_x) + 0.05 * range_y])\n elif plot_type == '2Dhistogram' or plot_type == '2dhistogram-mcp':\n colors = [(1,1,1), (0.10196, 0.42745, 0), (0.1294, 0.50588,0), (0.22745, 0.8039, 0.1843), (0.87058, 1, 0), (0.9882, 0.996, 0.1961), (0.9686, 0.8, 0.19215), (0.9529, 0.5922, 0.0118), (0.9451, 0.3882, 0.0157), (0.9333, 0.0314, 0.0157), (0.6078, 0.0118, 0), (0.1882, 0, 0)] # easy to see background\n # colors = [(1,1,1), (0.76078, 0.9254, 0.7254), (0.10196, 0.42745, 0), (0.1294, 0.50588,0), (0.22745, 0.8039, 0.1843), (0.87058, 1, 0), (0.9882, 0.996, 0.1961), (0.9686, 0.8, 0.19215), (0.9529, 0.5922, 0.0118), (0.9451, 0.3882, 0.0157), (0.9333, 0.0314, 0.0157), (0.6078, 0.0118, 0), (0.1882, 0, 0)] # low background\n n_bins = 1000\n cmap_name = 'my_name'\n\n cm = mpl.colors.LinearSegmentedColormap.from_list(cmap_name, colors, N=n_bins, gamma=1.5)\n# no normalization:\n # counts, xedges, yedges, Image = plt.hist2d(xss, yss, bins=[bins/2.5, bins], cmap=cm)\n# no normalization but maximum number of counts per bin:\n # counts, xedges, yedges, Image = plt.hist2d(xss, yss, bins=[bins/2.5, bins], cmap=cm, cmax=15)\n# logarithmic normalization:\n counts, xedges, yedges, Image = plt.hist2d(xss, yss, bins=[bins/2.5, bins], cmap=cm, norm=mpl.colors.LogNorm(vmin=1, vmax=100))\n plt.ylabel('%s' % x_y_yerr[0][1], fontsize=22)\n plt.xlabel('%s' % x_y_yerr[0][0], fontsize=22)\n range_x = max(xedges) - min(xedges)\n range_y = max(yedges) - min(yedges)\n plt.axis([min(xedges) - 0.05 * range_x, max(xedges) + 0.05 * range_x, min(yedges) - 0.05 * range_x, max(yedges) + 0.05 * range_x])\n elif plot_type == 'polar':\n plt.polar(xss, yss, 'g o', alpha=.15)\n plt.xlabel('%s' % x_y_yerr[0][0], fontsize=22)\n plt.gca().set_rlim(0, 20)\n elif plot_type == '2lines-no-error':\n range_x = max(xss) - min(xss)\n range_y = max(yss) - min(yss2)\n plt.gca().set_xticklabels([])\n if yerr == []:\n plt.figure(timestamp_plot)\n plt.plot(xss, yss, '%s' % color[color_points[0]], label='%s' % legend_text[0], markersize=markersize)\n plt.figure(timestamp_plot)\n plt.plot(xss, yss2, '%s' % color[color_points[1]], label='%s' % legend_text[1], markersize=markersize)\n plt.ylabel('%s' % x_y_yerr[0][1], fontsize=22)\n plt.xlabel('%s' % x_y_yerr[0][0], fontsize=22)\n if type(xss[0]) == datetime.datetime:\n range_time = (sum(int(x) * 60 ** i for i,x in enumerate(reversed(str(max(xss)-min(xss)).split(\":\")))))\n plt.axis([((min(xss) - datetime.timedelta(seconds=0.05*range_time))), ((max(xss) + datetime.timedelta(seconds=0.05*range_time))), min([min(yss), min(yss2)]) - 0.1 * range_y, max([max(yss), max(yss2)]) + 0.1 * range_y])\n else:\n plt.axis([min(xss) - 0.05 * range_x, max(xss) + 0.05 * range_x, min([min(yss), min(yss2)]) - 0.1 * range_y, max([max(yss), max(yss2)]) + 0.1 * range_y])\n plt.legend(fontsize=16)\n if plot_type == 'mcp':\n if markersize > 2:\n plt.plot(xss, yss, '%s o' % color[color_points], label='%s' % legend_text, markersize=markersize, alpha=0.08)\n else:\n plt.plot(xss, yss, '%s o' % color[color_points], label='%s' % legend_text, markersize=markersize)\n if (plot_type == 'mcp' or plot_type == '2dhistogram-mcp') and diff_to_AME_err != -99999:\n plt.ylabel('y / mm', fontsize=26)\n plt.xlabel('x / mm', fontsize=26)\n plt.xticks([-566.999995464,-377.999996976,-188.999998488,0,188.999998488,377.999996976,566.999995464], [u'\\u221218', u'\\u221212', u'\\u22126',0,6,12,18], fontsize=22)\n plt.yticks([-566.999995464,-377.999996976,-188.999998488,0,188.999998488,377.999996976,566.999995464], [u'\\u221218', u'\\u221212', u'\\u22126',0,6,12,18], fontsize=22)\n plt.axhline(y=0, linewidth=1, color = 'k')\n plt.axvline(x=0, linewidth=1, color='k')\n plt.text(-724.499994204, -724.499994204, '{} entries'.format(len(xss)))\n\n plt.axis([-750, 750, -750, 750])\n ax = plt.gca()\n mcp = Ellipse(xy=(0, 0), width=1440, height=1440, edgecolor='k', fc='None', lw=2)\n ax.add_patch(mcp)\n if diff_to_AME_err != -99999:\n if len(diff_to_AME_err) == 4:\n fwhm = Ellipse(xy=(diff_to_AME_err[0], diff_to_AME_err[2]), width=diff_to_AME[0]*2, height=diff_to_AME[2]*2, edgecolor='r', fc='None', lw=2)\n ax.add_patch(fwhm)\n anchored_text = AnchoredText('Fit X pos. = %3.2f (%3.2f)\\nFit Y pos. = %3.2f (%3.2f)\\nFit X FWHM = %3.2f (%3.2f)\\nFit Y FWHM = %3.2f (%3.2f)' %(float(diff_to_AME_err[0]), float(diff_to_AME_err[1]), float(diff_to_AME_err[2]), float(diff_to_AME_err[3]), float(diff_to_AME[0]), float(diff_to_AME[1]), float(diff_to_AME[2]), float(diff_to_AME[3])), loc=1)\n ax.add_artist(anchored_text)\n elif len(diff_to_AME_err) > 4:\n fwhm_1 = Ellipse(xy=(diff_to_AME_err[0], diff_to_AME_err[2]), width=diff_to_AME[0]*2, height=diff_to_AME[2]*2, edgecolor='r', fc='None', lw=2)\n ax.add_patch(fwhm_1)\n fwhm_2 = Ellipse(xy=(diff_to_AME_err[4], diff_to_AME_err[6]), width=diff_to_AME[0]*2, height=diff_to_AME[2]*2, edgecolor='r', fc='None', lw=2)\n ax.add_patch(fwhm_2)\n anchored_text = AnchoredText('Fit X pos. dom. = %3.2f (%3.2f)\\nFit Y pos. dom. = %3.2f (%3.2f)\\nFit X pos. rec. = %3.2f (%3.2f)\\nFit Y rec. dom. = %3.2f (%3.2f)\\nFit X FWHM = %3.2f (%3.2f)\\nFit Y FWHM = %3.2f (%3.2f)\\nStatus: %s' %(float(diff_to_AME_err[0]), float(diff_to_AME_err[1]), float(diff_to_AME_err[2]), float(diff_to_AME_err[3]), float(diff_to_AME_err[4]), float(diff_to_AME_err[5]), float(diff_to_AME_err[6]), float(diff_to_AME_err[7]),float(diff_to_AME[0]), float(diff_to_AME[1]), float(diff_to_AME[2]), float(diff_to_AME[3]), diff_to_AME_err[-1]), loc=1)\n ax.add_artist(anchored_text)\n\n if plot_type == '2dhistogram-mcp' and diff_to_AME_err == -99999:\n plt.ylabel('%s' % x_y_yerr[0][1], fontsize=22)\n plt.xlabel('%s' % x_y_yerr[0][0], fontsize=22)\n plt.xticks([-18,-12,-6,0,6,12,18], fontsize=22)\n plt.yticks([-18,-12,-6,0,6,12,18], fontsize=22)\n\n plt.axis([-23.81, 23.81, -23.81, 23.81])\n ax = plt.gca()\n mcp = Ellipse(xy=(0, 0), width=45.714, height=45.714, edgecolor='k', fc='None', lw=2)\n ax.add_patch(mcp)\n\n\n if title == '':\n pass\n elif plot_type == 'polar':\n plt.title('%s' % str(title), fontsize=26, y=1.03)\n else:\n plt.title('%s' % str(title), fontsize=26)\n if ('dominant' in title or 'recessive' in title) and 'vector' not in title:\n plt.text(0, min(yss)-max(yerr), '{}'.format(os.getcwd().split(os.sep)[-2])) # get folder name of run\n\n\n\n if fit == 'yes':\n x = mle.var('x', observed=True, vector=True)\n y = mle.var('y', observed=True, vector=True)\n\n a = mle.var('a')\n b = mle.var('b')\n c = mle.var('c')\n sigma = mle.var('sigma')\n\n\n if fit_function == 'gauss':\n model = mle.Normal(y, a * numpy.exp(-(x - b)**2.0 / (2 * c**2)), sigma)\n def func(x, a, b, c):\n return a * numpy.exp(-(x - b)**2.0 / (2 * c**2))\n elif fit_function == 'x^2':\n model = mle.Normal(y, a * (x**2.0) + b * x + c, sigma)\n def func(x, a, b, c):\n return a * (x**2.0) + b * x + c\n elif fit_function == 'x^4':\n model = mle.Normal(y, a * (x**2.0) + b * (x**4.0) + c, sigma)\n def func(x, a, b, c):\n return a * (x**2.0) + b * (x**4.0) + c\n elif fit_function == 'linear':\n model = mle.Normal(y, a * x + b + c, sigma)\n def func(x, a, b, c):\n return a * x + b + c\n elif fit_function == 'sin':\n model = mle.Normal(y, a * numpy.sin(b*x) + c, sigma)\n def func(x, a, b, c):\n return a * numpy.sin(b*x) + c\n elif fit_function == 'exp':\n model = mle.Normal(y, a * numpy.exp(b * x) + c, sigma)\n def func(x, a, b, c):\n return a * numpy.exp(b * x) + c\n elif fit_function == 'log':\n model = mle.Normal(y, a * numpy.log(b * x) + c, sigma)\n def func(x, a, b, c):\n return a * numpy.log(b * x) + c\n\n if fit_range == 'full':\n if plot_type == 'scatter':\n xs = numpy.array(xss)\n ys = numpy.array(yss)\n elif plot_type == 'histogram':\n xs_hilf = numpy.array(xedges)\n xs = xs_hilf[:-1]\n ys = numpy.array(hist_x)\n elif fit_range == 'partly':\n if plot_type == 'scatter':\n xs_list = []\n ys_list = []\n xs_hilf = numpy.array(xss)\n ys_hilf = numpy.array(yss)\n for kko in range(len(xss)):\n if xs_hilf[kko] >= x_fit_min and xs_hilf[kko] <= x_fit_max:\n xs_list.append(xs_hilf[kko])\n ys_list.append(ys_hilf[kko])\n xs = numpy.array(xs_list)\n ys = numpy.array(ys_list)\n elif plot_type == 'histogram':\n xs = numpy.array(xedges[int(x_fit_min*bins):int(x_fit_max*bins)])\n xs = xs_hilf[:-1]\n ys = numpy.array(hist_x)\n result_mlf = model.fit({'x': xs, 'y': ys}, {'a': 1, 'b': 1, 'c': 1, 'sigma': 1}) # FIT Startbedingungen\n parameter_mlf = []\n parameter_mlf.append(result_mlf.x['a'])\n parameter_mlf.append(result_mlf.x['b'])\n parameter_mlf.append(result_mlf.x['c'])\n parameter_mlf_err = []\n parameter_mlf_err = numpy.sqrt(numpy.diag(result_mlf['hess_inv'])) # one standard deviation error of parameters\n # print parameter_mlf, parameter_mlf_err\n\n if plot_type == 'scatter':\n hilfx = numpy.linspace(min(xss), max(xss), 10000)\n elif plot_type == 'histogram':\n hilfx = numpy.linspace(min(xedges[:bins]), max(xedges[:bins]), 1000)\n\n plt.figure(timestamp_plot, figsize=(8, 8), facecolor='white')\n plt.plot(hilfx, func(hilfx, *parameter_mlf), '%s-' % color[color_fit], linewidth=2, label='Max. likelihood fit')\n plt.tick_params(labelsize=16)\n\n if legend_text == '' or plot_type == '2lines-no-error':\n pass\n else:\n plt.legend(fontsize=16, numpoints=1)\n\n if fit_function == 'gauss':\n plt.text(min(xss), max(yss) - 0.2 * range_y, 'Max. likelihood estimation: \\na = %3.2f (%3.2f) \\nb = %3.2f (%3.2f) \\nc = %3.2f (%3.2f)' % (parameter_mlf[0], parameter_mlf_err[1], parameter_mlf[1], parameter_mlf_err[2], parameter_mlf[2], parameter_mlf_err[3]), fontsize=10)\n plt.text(min(xss), max(yss) - 0.05 * range_y, 'Fit-Funktion: \\n$a*exp\\\\left(\\\\frac{-(x - b)^2}{2 * c^2}\\\\right)$', fontsize=10)\n elif fit_function == 'x^2':\n plt.text(min(xss), max(yss) - 0.3 * range_y, 'Max. likelihood estimation: \\na = %3.2f (%3.2f) \\nb = %3.2f (%3.2f) \\nc = %3.2f (%3.2f) \\nextremum = %3.2f' % (parameter_mlf[0], parameter_mlf_err[1], parameter_mlf[1], parameter_mlf_err[2], parameter_mlf[2], parameter_mlf_err[3], (-parameter_mlf[1]/2/parameter_mlf[0])), fontsize=10)\n plt.text(min(xss), max(yss) - 0.05 * range_y, 'Fit-Funktion: \\n$a*x^2 + b*x + c$', fontsize=10)\n elif fit_function == 'x^4':\n plt.text(min(xss), max(yss) - 0.3 * range_y, 'Max. likelihood estimation: \\na = %3.8f (%3.8f) \\nb = %3.8f (%3.8f) \\nc = %3.8f (%3.8f)' % (parameter_mlf[0], parameter_mlf_err[1], parameter_mlf[1], parameter_mlf_err[2], parameter_mlf[2], parameter_mlf_err[3]), fontsize=10)\n plt.text(min(xss), max(yss) - 0.05 * range_y, 'Fit-Funktion: \\n$a*x^2 + b*x^4 + c$', fontsize=10)\n elif fit_function == 'linear':\n plt.text(min(xss), max(yss) - 0.2 * range_y, 'Max. likelihood estimation: \\na = %3.2f (%3.2f) \\nb = %3.2f (%3.2f)' % (parameter_mlf[0], parameter_mlf_err[1], parameter_mlf[1] + parameter_mlf[2], parameter_mlf_err[2] + parameter_mlf_err[3]), fontsize=10)\n plt.text(min(xss), max(yss) - 0.05 * range_y, 'Fit-Funktion: \\n$a*x+b$', fontsize=10)\n elif fit_function == 'sin':\n plt.text(min(xss), max(yss) - 0.2 * range_y, 'Max. likelihood estimation: \\na = %3.2f (%3.2f) \\nb = %3.2f (%3.2f) \\nc = %3.2f (%3.2f)' % (parameter_mlf[0], parameter_mlf_err[1], parameter_mlf[1], parameter_mlf_err[2], parameter_mlf[2], parameter_mlf_err[3]), fontsize=10)\n plt.text(min(xss), max(yss) - 0.05 * range_y, 'Fit-Funktion: \\n$a*sin\\\\left(b*x\\\\right) + c$', fontsize=10)\n elif fit_function == 'exp':\n plt.text(min(xss), max(yss) - 0.2 * range_y, 'Max. likelihood estimation: \\na = %3.2f (%3.2f) \\nb = %3.2f (%3.2f) \\nc = %3.2f (%3.2f)' % (parameter_mlf[0], parameter_mlf_err[1], parameter_mlf[1], parameter_mlf_err[2], parameter_mlf[2], parameter_mlf_err[3]), fontsize=10)\n plt.text(min(xss), max(yss) - 0.05 * range_y, 'Fit-Funktion: \\n$a*exp\\\\left(b*x\\\\right) + c$', fontsize=10)\n elif fit_function == 'log':\n plt.text(min(xss), max(yss) - 0.2 * range_y, 'Max. likelihood estimation: \\na = %3.2f (%3.2f) \\nb = %3.2f (%3.2f) \\nc = %3.2f (%3.2f)' % (parameter_mlf[0], parameter_mlf_err[1], parameter_mlf[1], parameter_mlf_err[2], parameter_mlf[2], parameter_mlf_err[3]), fontsize=10)\n plt.text(min(xss), max(yss) - 0.05 * range_y, 'Fit-Funktion: \\n$a*log\\\\left(b*x\\\\right) + c$', fontsize=10)\n\n if grid_on_off == 'on':\n plt.grid(True)\n # gridlines = ax.get_xgridlines() + ax.get_ygridlines()\n # for line in gridlines:\n # line.set_linestyle('-.')\n # print chi_sq_in_plot\n if not chi_sq_in_plot == '' and not plot_type == 'mcp' and not plot_type == '2dhistogram-mcp':\n # print 'fucking here'\n # print chi_sq_in_plot[2]\n plt.tick_params(labelsize=16)\n plt.axhline(y=0, color='k', linestyle='-', linewidth=2)\n\n plt.axhline(y=diff_to_AME, color='r', linestyle='-')\n plt.axhline(y=diff_to_AME+diff_to_AME_err, color='r', linestyle='--', alpha=0.75)\n plt.axhline(y=diff_to_AME-diff_to_AME_err, color='r', linestyle='--', alpha=0.75)\n if chi_sq_in_plot[2] >= 0:\n plt.axhline(y=diff_to_AME-diff_to_AME_err*math.sqrt(chi_sq_in_plot[0]), color='r', linestyle='-.', alpha=0.65)\n plt.axhline(y=diff_to_AME+diff_to_AME_err*math.sqrt(chi_sq_in_plot[0]), color='r', linestyle='-.', alpha=0.65)\n ax = P.gca()\n anchored_text = AnchoredText('$\\Delta_{AME}$ = %3.2f ($\\pm$%3.2f) keV\\nAvg. $\\chi^2_{red}$ = %3.2f\\nAvg. unc. = %3.2f keV\\nt$_{acc}$ = %3.2f ms\\nMass dep. shift = %s\\nSys. unc. = %s' %(diff_to_AME, diff_to_AME_err, chi_sq_in_plot[0], numpy.mean(yerr), chi_sq_in_plot[1], chi_sq_in_plot[2], chi_sq_in_plot[3]), loc=1)\n else:\n ax = P.gca()\n # plt.axhline(y=diff_to_AME-diff_to_AME_err*math.sqrt(chi_sq_in_plot[0]), color='r', linestyle='-.', alpha=0.65)\n # plt.axhline(y=diff_to_AME+diff_to_AME_err*math.sqrt(chi_sq_in_plot[0]), color='r', linestyle='-.', alpha=0.65)\n anchored_text = AnchoredText('$\\Delta_{AME}$ = %3.2f ($\\pm$%3.2f) keV\\nAvg. $\\chi^2_{red}$ = %3.2f \\n%s' %(diff_to_AME, diff_to_AME_err, chi_sq_in_plot[0], chi_sq_in_plot[1]), loc=1)\n # print 'or here?!?'\n anchored_text.patch.set_boxstyle(\"round,pad=0.,rounding_size=0.2\")\n anchored_text.patch.set_alpha(0.8)\n ax.add_artist(anchored_text)\n\n if type(xss[0]) == datetime.datetime:\n range_time = (max(xss)-min(xss)).total_seconds()\n interval_time = [((min(xss) - datetime.timedelta(seconds=0.05*range_time))), ((max(xss) + datetime.timedelta(seconds=0.05*range_time)))]\n plt.fill_between(interval_time, y1=diff_to_AME+diff_to_AME_err, y2=diff_to_AME-diff_to_AME_err, color='r', alpha=0.1)\n plt.fill_between(interval_time, y1=diff_to_AME+diff_to_AME_err*math.sqrt(chi_sq_in_plot[0]), y2=diff_to_AME-diff_to_AME_err*math.sqrt(chi_sq_in_plot[0]), color='r', alpha=0.1)\n else:\n plt.fill_between([min(xss) - 0.05 * range_x, max(xss) + 0.05 * range_x], y1=diff_to_AME+diff_to_AME_err, y2=diff_to_AME-diff_to_AME_err, color='r', alpha=0.1)\n plt.fill_between([min(xss) - 0.05 * range_x, max(xss) + 0.05 * range_x], y1=diff_to_AME+diff_to_AME_err*math.sqrt(chi_sq_in_plot[0]), y2=diff_to_AME-diff_to_AME_err*math.sqrt(chi_sq_in_plot[0]), color='r', alpha=0.1)\n\n plt.tight_layout()\n\n plt.savefig('%s.pdf' % file_name)\n if png_on_off == 'on':\n plt.savefig('%s.PNG' % file_name)\n\n if show_plot == 'yes':\n plt.show()\n elif show_plot == 'no':\n plt.close()\n\n if fit == 'yes':\n return(xss, yss, yerr, parameter_mlf, parameter_mlf_err)\n elif plot_type == 'histogram':\n return(xss, yss, yerr, hist_x, xedges)\n else:\n return(xss, yss, yerr)\n"
] |
[
[
"numpy.diag",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"numpy.mean",
"numpy.exp",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"numpy.sin",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.log",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.fill_between",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.polar",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.rc",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.axvline",
"matplotlib.colors.LogNorm",
"matplotlib.use",
"matplotlib.pyplot.grid",
"matplotlib.offsetbox.AnchoredText",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tick_params"
]
] |
pauleckhardt/obsdata
|
[
"f65168694b76a7e70c56d50ef515d7d5a64f581d"
] |
[
"obsdata/indaaf_config.py"
] |
[
"import os\nimport pandas as pd\nfrom collections import namedtuple\n\n\nDATADIR = os.path.join(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))),\n \"data\"\n)\n\n\nDATASETS = [\n {\n \"name\": \"Precipitation\",\n \"href\": \"/catalog/dataset/1\",\n \"id\": 1,\n \"time_interval\": \"daily\",\n \"contact\": \"[email protected]\",\n },\n {\n \"name\": \"Gas\",\n \"href\": \"/catalog/dataset/2\",\n \"id\": 2,\n \"time_interval\": \"monthly\",\n \"contact\": \"[email protected]\",\n },\n {\n \"name\": \"Aerosols\",\n \"href\": \"/catalog/dataset/3\",\n \"id\": 3,\n \"time_interval\": \"daily\",\n \"contact\": \"[email protected]\",\n },\n # Meteo dataset differs, it has different\n # id for different sites\n {\n \"name\": \"Meteo\",\n \"href\": \"/catalog/dataset/4\",\n \"ids\": [10, 11, 12, 13],\n \"site_ids\": [11, 12, 14, 13],\n \"time_interval\": \"hourly\",\n \"contact\": \"[email protected]\",\n },\n]\n\n\nSiteInfo = namedtuple(\n \"SiteInfo\",\n [\n \"country\",\n \"site\",\n \"code\",\n \"classification\",\n \"latitude\",\n \"longitude\",\n \"altitude\",\n ]\n)\n\n\nclass InputError(Exception):\n pass\n\n\ndef get_all_parameters(dataset):\n parameter_file = os.path.join(\n DATADIR, \"indaaf_parameters.csv\"\n )\n df = pd.read_csv(parameter_file)\n df = df.loc[df['Theme'] == dataset]\n return list(df[\"Parameter name\"].values)\n\n\ndef get_all_site_codes():\n return list(range(1, 17))\n\n\ndef get_dataset_id(dataset, site_id):\n if dataset == \"Meteo\":\n try:\n return DATASETS[3][\"ids\"][\n DATASETS[3][\"site_ids\"].index(site_id)\n ]\n except ValueError:\n print(\n \"Meteo dataset is only available for \"\n \"sites 11, 12, 13, and 14\"\n )\n raise(InputError)\n try:\n return DATASETS[\n [row[\"name\"] for row in DATASETS].index(dataset)\n ][\"id\"]\n except ValueError:\n print(\"Unvalid datasets. Valid datasets:\")\n print(DATASETS)\n raise(InputError)\n\n\ndef get_parameter_id(parameter, dataset):\n parameter_file = os.path.join(\n DATADIR, \"indaaf_parameters.csv\"\n )\n df = pd.read_csv(parameter_file)\n row = df.loc[\n (df['Parameter name'] == parameter) &\n (df['Theme'] == dataset)\n ]\n if row.empty:\n print(\"Unvalid parameter. Valid parameters:\")\n print(df)\n raise(InputError)\n else:\n return row\n\n\ndef get_site_info(site_id):\n site_file = os.path.join(\n DATADIR, \"indaaf_sites.csv\"\n )\n df = pd.read_csv(site_file)\n row = df.loc[df['ID'] == site_id]\n if row.empty:\n print(\"Unvalid site ID. Valid sites:\")\n print(df)\n raise(InputError)\n return SiteInfo(\n country=row[\"Location\"].values[0],\n site=row[\"Site name\"].values[0],\n code=str(row[\"ID\"].values[0]),\n classification=row[\"Type\"].values[0],\n latitude=row[\"Latitude (°)\"].values[0],\n longitude=row[\"Longitude (°)\"].values[0],\n altitude=row[\"Altitude (m)\"].values[0],\n )\n"
] |
[
[
"pandas.read_csv"
]
] |
TrietChau/asd
|
[
"49d08b49246f1720cb1ac1134d5f34391a5c74f2"
] |
[
"pyxel/gl_wrapper.py"
] |
[
"import ctypes\n\nimport numpy as np\nimport OpenGL.GL as gl\nfrom OpenGL.GL import shaders\n\n\nclass GLShader:\n def __init__(self, vertex_shader, fragment_shader):\n self._program = shaders.compileProgram(\n shaders.compileShader(vertex_shader, gl.GL_VERTEX_SHADER),\n shaders.compileShader(fragment_shader, gl.GL_FRAGMENT_SHADER),\n )\n self._att = None\n self._tex_list = []\n\n def begin(self, att, tex_list):\n self._att = att\n self._tex_list = tex_list\n\n gl.glUseProgram(self._program)\n\n if att:\n att._begin(self._program)\n\n for i, tex in enumerate(tex_list):\n if tex:\n tex._begin(i)\n\n def end(self):\n for i, tex in enumerate(self._tex_list):\n if tex:\n tex._end(i)\n\n if self._att:\n self._att._end(self._program)\n\n gl.glUseProgram(0)\n\n self._att = None\n self._tex_list = []\n\n def set_uniform(self, name, param_type, *params):\n loc = gl.glGetUniformLocation(self._program, name)\n getattr(gl, \"glUniform\" + param_type)(loc, *params)\n\n\nclass GLAttribute:\n def __init__(self, att_info, count, *, dynamic=False):\n self._att_info = att_info[:]\n self._size = sum(att[2] for att in att_info)\n self._stride = self._size * 4\n self._count = count\n self._usage = dynamic and gl.GL_DYNAMIC_DRAW or gl.GL_STATIC_DRAW\n self._dtype = gl.GL_FLOAT\n self._data = np.zeros((count, self._size), np.float32)\n self._buf = gl.glGenBuffers(1)\n self._should_update_data = True\n\n @property\n def data(self):\n return self._data\n\n def update(self, count=0):\n self._count = (count == 0) and self._data.shape[0] or count\n self._should_update_data = True\n\n def _begin(self, program):\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._buf)\n\n if self._should_update_data:\n size = self._stride * self._count\n gl.glBufferData(\n gl.GL_ARRAY_BUFFER, size, self._data[:size].tobytes(), self._usage\n )\n self._should_update_data = False\n\n for att in self._att_info:\n loc = gl.glGetAttribLocation(program, att[0])\n gl.glVertexAttribPointer(\n loc,\n att[2],\n self._dtype,\n gl.GL_FALSE,\n self._stride,\n ctypes.c_void_p(att[1] * 4),\n )\n gl.glEnableVertexAttribArray(loc)\n\n def _end(self, program):\n for att in self._att_info:\n loc = gl.glGetAttribLocation(program, att[0])\n gl.glDisableVertexAttribArray(loc)\n\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)\n\n\nclass GLTexture:\n def __init__(self, width, height, size, *, nearest=False):\n if size == 1:\n self._format = gl.GL_LUMINANCE\n shape = (height, width)\n elif size == 3:\n self._format = gl.GL_RGB\n shape = (height, width, 3)\n elif size == 4:\n self._format = gl.GL_RGBA\n shape = (height, width, 4)\n else:\n raise ValueError(\"invalid texture format\")\n\n self._width = width\n self._height = height\n self._filter = nearest and gl.GL_NEAREST or gl.GL_LINEAR\n self._data = np.zeros(shape, np.uint8)\n self._tex = gl.glGenTextures(1)\n self._should_update_data = True\n\n @property\n def width(self):\n return self._width\n\n @property\n def height(self):\n return self._height\n\n @property\n def data(self):\n return self._data\n\n def update(self):\n self._should_update_data = True\n\n def copy_screen(self, x, y, left, bottom, width, height):\n gl.glBindTexture(gl.GL_TEXTURE_2D, self._tex)\n gl.glCopyTexSubImage2D(gl.GL_TEXTURE_2D, 0, x, y, left, bottom, width, height)\n gl.glBindTexture(gl.GL_TEXTURE_2D, 0)\n\n def _begin(self, i):\n gl.glActiveTexture(gl.GL_TEXTURE0 + i)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self._tex)\n\n if self._should_update_data:\n gl.glTexImage2D(\n gl.GL_TEXTURE_2D,\n 0,\n self._format,\n self._width,\n self._height,\n 0,\n self._format,\n gl.GL_UNSIGNED_BYTE,\n self._data.tobytes(),\n )\n self._should_update_data = False\n\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, self._filter)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, self._filter)\n gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)\n\n @staticmethod\n def _end(i):\n gl.glActiveTexture(gl.GL_TEXTURE0 + i)\n gl.glBindTexture(gl.GL_TEXTURE_2D, 0)\n"
] |
[
[
"numpy.zeros"
]
] |
mortazavilab/PyWGCNA
|
[
"f61bf9a71c39d1b58c787b36ffd271d68de663b4"
] |
[
"PyWGCNA/comparison.py"
] |
[
"import pandas as pd\nimport numpy as np\nfrom scipy.stats import fisher_exact\nimport matplotlib.pyplot as plt\nimport pickle\n\n\n# bcolors\nHEADER = '\\033[95m'\nOKBLUE = '\\033[94m'\nOKCYAN = '\\033[96m'\nOKGREEN = '\\033[92m'\nWARNING = '\\033[93m'\nFAIL = '\\033[91m'\nENDC = '\\033[0m'\nBOLD = '\\033[1m'\nUNDERLINE = '\\033[4m'\n\n\nclass Comparison:\n \"\"\"\n A class used to compare PyWGCNA to another PyWGCNA or any gene marker table\n\n :param name1: name of first WGCNA\n :type name1: str\n :param name2: name of second WGCNA\n :type name2: str\n :param geneModule1: gene modules of first WGCNA\n :type geneModule1: dict\n :param geneModule2: gene modules of second WGCNA\n :type geneModule2: dict\n :param geneMarker: gene marker of single cell data\n :type geneMarker: pandas dataframe\n :param sc: indicate if object is WGCNA or single cell\n :type sc: bool\n :param comparison: Summary of comparison results\n :type comparison: pandas dataframe\n\n \"\"\"\n\n def __init__(self, name1=\"name1\", name2=\"name2\", geneModule1=None, geneModule2=None, geneMarker=None, sc=False):\n self.name1 = name1\n self.name2 = name2\n self.geneModule1 = geneModule1\n self.geneModule2 = geneModule2\n self.geneMarker = geneMarker\n self.sc = sc\n\n self.comparison = None\n\n def compareWGCNA(self):\n \"\"\"\n Compare two list of modules from two bulk gene expression data set\n\n :return: update compare class that replace automatically\n :rtype: compare class\n \"\"\"\n if self.name1 == self.name2:\n name1 = self.name1 + \"1\"\n name2 = self.name2 + \"2\"\n else:\n name1 = self.name1\n name2 = self.name2\n moduleColors1 = self.geneModule1.moduleColors.unique().tolist()\n moduleColors2 = self.geneModule2.moduleColors.unique().tolist()\n num = len(moduleColors1) * len(moduleColors2)\n df = pd.DataFrame(columns=[name1, name2, name1 + \"_size\", name2 + \"_size\", \"number\", \"fraction(%)\", \"P_value\"],\n index=range(num))\n\n genes = []\n count = 0\n for moduleColor1 in moduleColors1:\n node1 = self.geneModule1.loc[self.geneModule1.moduleColors == moduleColor1, 'gene_id'].tolist()\n genes = genes + node1\n for moduleColor2 in moduleColors2:\n node2 = self.geneModule2.loc[self.geneModule2.moduleColors == moduleColor2, 'gene_id'].tolist()\n\n df[name1][count] = moduleColor1\n df[name2][count] = moduleColor2\n df[name1 + '_size'][count] = len(node1)\n df[name2 + '_size'][count] = len(node2)\n num = np.intersect1d(node1, node2)\n df['number'][count] = len(num)\n df['fraction(%)'][count] = len(num) / len(node2) * 100\n count = count + 1\n\n genes = genes + node2\n\n genes = list(set(genes))\n nGenes = len(genes)\n\n count = 0\n for moduleColor1 in moduleColors1:\n for moduleColor2 in moduleColors2:\n table = np.array(\n [[nGenes - df[name1 + '_size'][count] - df[name2 + '_size'][count] + df['number'][count],\n df[name1 + '_size'][count] - df['number'][count]],\n [df[name2 + '_size'][count] - df['number'][count],\n df['number'][count]]])\n oddsr, p = fisher_exact(table, alternative='two-sided')\n df['P_value'][count] = p\n count = count + 1\n\n self.comparison = df\n\n def compareSingleCell(self):\n \"\"\"\n Compare PyWGCNA object to single cell gene expression data\n\n :return: update compare class that replace automatically\n :rtype: compare class\n \"\"\"\n list_sn = np.unique(self.geneMarker['cluster'])\n num = len(self.geneModule1.keys()) * len(list_sn)\n df = pd.DataFrame(\n columns=[\"WGCNA\", \"sc\", \"WGCNA_size\", \"sc_size\", \"number\", \"fraction(%)\", \"P_value\", \"cellType\"],\n index=range(num))\n\n genes = []\n count = 0\n for i in range(len(self.geneModule1.keys())):\n node1 = self.geneModule1[self.geneModule1.keys()[i]]\n genes = genes + node1\n for j in range(len(list_sn)):\n node2 = self.geneMarker[self.geneMarker['cluster'] == list_sn[j], :]\n\n df['WGCNA'][count] = self.geneModule1.keys()[i]\n df['sc'][count] = \"N\" + str(list_sn[j])\n df['WGCNA_size'][count] = len(node1)\n df['sc_size'][count] = len(node2)\n num = np.intersect1d(node1, node2)\n df['number'][count] = len(num)\n df['fraction(%)'][count] = len(num) / len(node2) * 100\n df['cellType'][count] = self.geneMarker['cellType'][\n np.where(self.geneMarker['cluster'] == list_sn[j]).tolist()[0]]\n count = count + 1\n\n genes = genes + node2\n\n genes = list(set(genes))\n nGenes = len(genes)\n\n count = 0\n for i in range(len(self.geneModule1.keys())):\n for j in range(len(list_sn)):\n table = np.array([[nGenes - df['WGCNA'][count] - df['sc'][count] + df['number'][count],\n df['WGCNA'][count] - df['number'][count]],\n [df['sc'][count] - df['number'][count],\n df['number'][count]]])\n oddsr, p = fisher_exact(table, alternative='two-sided')\n df['P_value'][count] = p\n count = count + 1\n\n self.comparison = df\n\n def plotCompareWGCA(self, order1=None, order2=None, save=False):\n \"\"\"\n plot comparison\n\n :param order1: order of modules in PyWGCNA1 you want to show in plot (name of each elements should mapped the name of modules in your first PyWGCNA)\n :type order1: list of str\n :param order2: order of modules in PyWGCNA2 you want to show in plot (name of each elements should mapped the name of modules in your second PyWGCNA)\n :type order2: list of str\n :param save: if you want to save plot as comparison.png near to your script\n :type save: bool\n\n \"\"\"\n result = self.comparison.copy(deep=True)\n result['-log10(P_value)'] = -1 * np.log10(result['P_value'].astype(np.float64))\n\n if self.name1 == self.name2:\n name1 = self.name1 + \"1\"\n name2 = self.name2 + \"2\"\n else:\n name1 = self.name1\n name2 = self.name2\n\n result.drop(labels=np.where(result[name1] == 'grey')[0].tolist(),\n axis=0,\n inplace=True)\n result.reset_index(drop=True, inplace=True)\n result.drop(labels=np.where(result[name2] == 'grey')[0].tolist(),\n axis=0,\n inplace=True)\n result.reset_index(drop=True, inplace=True)\n\n result.loc[np.where(result['fraction(%)'] == 0)[0].tolist(), 'fraction(%)'] = np.nan\n result.loc[np.where(result['fraction(%)'] == 0)[0].tolist(), 'fraction(%)'] = np.nan\n\n if np.max(result['-log10(P_value)'][np.isfinite(result['-log10(P_value)'])]) is np.nan:\n result.loc[np.isinf(result['-log10(P_value)']), '-log10(P_value)'] = 100\n else:\n result.loc[np.isinf(result['-log10(P_value)']), '-log10(P_value)'] = np.max(\n result['-log10(P_value)'][np.isfinite(result['-log10(P_value)'])]) + 1\n\n grey = result.copy(deep=True)\n result.loc[np.where(result['P_value'] > 0.01)[0].tolist(), '-log10(P_value)'] = np.nan\n\n result.dropna(axis=0, inplace=True)\n result.reset_index(drop=True, inplace=True)\n\n grey.loc[np.where(grey['P_value'] <= 0.01)[0].tolist(), '-log10(P_value)'] = np.nan\n grey.dropna(axis=0, inplace=True)\n grey.reset_index(drop=True, inplace=True)\n\n if order1 is not None:\n result[name1] = pd.Categorical(result[name1], order1)\n result.sort_values(by=[name1], inplace=True)\n\n grey[name1] = pd.Categorical(grey[name1], order1)\n grey.sort_values(by=[name1], inplace=True)\n\n if order2 is not None:\n result[name2] = pd.Categorical(result[name2], order2)\n result.sort_values(by=[name2], inplace=True)\n\n grey[name2] = pd.Categorical(grey[name2], order2)\n grey.sort_values(by=[name2], inplace=True)\n\n fig, ax = plt.subplots(figsize=(max(5, len(np.unique(result[name1])) / 3)+3,\n max(5, len(np.unique(result[name2])) / 3)),\n facecolor='white')\n scatter = ax.scatter(x=result[name1],\n y=result[name2],\n s=result['fraction(%)'].astype(float)*4,\n c=result['-log10(P_value)'],\n alpha=0.8,\n cmap='viridis',\n vmin=np.min(result['fraction(%)']),\n vmax=np.max(result['fraction(%)']))\n # Add a colorbar\n fig.colorbar(scatter, shrink=0.25, label='-log10(P_value)')\n\n geyplot = ax.scatter(x=grey[name1],\n y=grey[name2],\n s=grey['fraction(%)'].astype(float)*4,\n c='grey',\n alpha=0.8,\n vmin=np.min(grey['fraction(%)']),\n vmax=np.max(grey['fraction(%)']))\n\n # produce a legend with the unique colors from the scatter\n kw = dict(prop=\"sizes\", num=4, color='black', fmt=\"{x:.1f} %\",\n func=lambda s: s/4)\n legend1 = ax.legend(*scatter.legend_elements(**kw),\n bbox_to_anchor=(1.05, 0.98),\n loc=\"upper left\",\n title=\"Fraction(%)\",\n frameon=False)\n ax.add_artist(legend1)\n\n if grey.shape[0] != 0:\n kw = dict(prop=\"sizes\",\n num=1,\n color='grey',\n fmt=\"< 2\")\n legend2 = ax.legend(*geyplot.legend_elements(**kw),\n bbox_to_anchor=(1.05, 0.75),\n loc=\"upper left\",\n title=\"-log10(P_value)\",\n frameon=False)\n ax.add_artist(legend2)\n\n plt.xticks(rotation=90)\n plt.xlabel(name1 + \" modules\")\n plt.ylabel(name2 + \" modules\")\n\n if save:\n plt.savefig('comparison_' + name1 + '_' + name2 + '.png')#'.pdf', format='pdf')\n plt.show()\n\n def saveComparison(self):\n \"\"\"\n save comparison object as comparison.p near to the script\n \n \"\"\"\n print(f\"{BOLD}{OKBLUE}Saving comparison as comparison.p{ENDC}\")\n\n picklefile = open('comparison.p', 'wb')\n pickle.dump(self, picklefile)\n picklefile.close()\n\n"
] |
[
[
"numpy.array",
"numpy.isfinite",
"numpy.unique",
"numpy.min",
"pandas.Categorical",
"matplotlib.pyplot.savefig",
"numpy.intersect1d",
"numpy.max",
"scipy.stats.fisher_exact",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"numpy.where",
"numpy.isinf",
"matplotlib.pyplot.ylabel"
]
] |
mnagaku/ParaMol
|
[
"13529f584e2d50076e038388ecbdd57af23c73b9"
] |
[
"ParaMol/MM_engines/openmm.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nDescription\n-----------\nThis module defines the :obj:`ParaMol.MM_engines.openmm.OpenMMEngine` class which is the ParaMol wrapper for OpenMM.\n\"\"\"\n\nimport logging\nimport numpy as np\nimport simtk.unit as unit\nimport simtk.openmm as openmm\nimport simtk.openmm.app as app\n\n\n# ------------------------------------------------------------ #\n# #\n# OpenMMEngine #\n# #\n# ------------------------------------------------------------ #\nclass OpenMMEngine:\n \"\"\"\n ParaMol's OpenMM wrapper.\n\n Parameters\n ----------\n init_openmm : bool, optional, default=False\n Whether or not to create the OpenMM system, topology, integrator and context and platform upon creation of an OpenMMEngine instance.\n Note that the only objects created are the ones not passed as parameters.\n topology_format : str, optional, default=None\n Available options are \"AMBER\", \"GROMACS\", \"CHARMM\", or \"XML\".\n top_file : str, optional, default=None\n Path to the AMBER, GROMACS or CHARMM topology file.\n crd_format : str\n Available options are \"AMBER\", \"GROMACS\", \"CHARMM\", or \"PDB\".\n crd_file : str, optional, default=None\n Path to the AMBER, GROMACS or CHARMM coordinates file.\n charmm_param_file : str\n Path to the CHARMM param file.\n xml_file : str, optional, default=None\n Path to the .xml OpenMM system file.\n platform_name : str, optional, default='Reference'\n Name of the OpenMM platform.\n integrator_params : dict, optional, default={'temperature' : 300.0 * unit.kelvin, 'stepSize' : 0.001 * unit.picoseconds, 'frictionCoeff' : 2.0 / unit.picoseconds}\n Keyword arguments passed to the simtk.openmm.openmm.LangevinIntegrator. Ignored if an OpenMM Integrator is provided through the integrator parameter.\n create_system_params : dict, optional, default={'temperature' : 300.0 * unit.kelvin, 'stepSize' : 0.001 * unit.picoseconds, 'frictionCoeff' : 2.0 / unit.picoseconds}\n Keyword arguments passed to simtk.openmm.app.amberprmtopfile.createSystem. Ignored if an OpenMM System is provided through the system parameter.\n system : simtk.openmm.openmm.System, optional, default=None\n OpenMM system.\n integrator : any OpenMM integrator, optional, default=None\n OpenMM integrator.\n platform : simtk.openmm.openmm.Platform, optional, default=None\n OpenMM platform\n context : simtk.openmm.openmm.Context, optional, default=None\n OpenMM context.\n topology : simtk.openmm.app.topology.Topology, optional, default=None\n OpenMM topology.\n\n Attributes\n ----------\n topology_format : str\n Available options are \"AMBER\", \"GROMACS\", \"CHARMM\", or \"XML\".\n top_file : str\n Path to the AMBER, GROMACS or CHARMM topology file.\n crd_format : str\n Available options are \"AMBER\", \"GROMACS\", \"CHARMM\", or \"PDB\".\n charmm_param_file : str\n Path to the CHARMM param file.\n xml_file : str\n Path to the .xml OpenMM system file.\n crd_file : str\n Path to the AMBER, GROMACS or CHARMM coordinates file.\n platform_name : str, optional, default='Reference'\n Name of the OpenMM platform.\n system : simtk.openmm.openmm.System\n OpenMM system.\n integrator : any OpenMM integrator\n OpenMM integrator.\n platform : simtk.openmm.openmm.Platform\n OpenMM platform\n context : simtk.openmm.openmm.Context\n OpenMM context.\n topology : simtk.openmm.app.topology.Topology\n OpenMM topology.\n force_groups : list of int\n List containing all the force groups present in the system.\n atom_list : list of str\n List containing the atom symbols of the system. Method get_atom_list has to be run to set this attribute variable.\n atomic_number_list : list of int\n List containing the atomic numbers of the system. Method get_atomic_numbers has to be run to set this attribute variable.\n masses_list : list of float\n List containing the masses of the atoms of the system. Method get_masses has to be run to set this attribute variable.\n n_atoms : int\n Number of atoms of the system.\n cell : np.ndarray, shape=(3, 3), dtype=float\n Array containing the box size cell vectors (in angstroms). Method get_cell has to be run to set this attribute variable.\n scnb : float\n 1-4 scaling factor for LJ interactions(0.833333).\n scee : float\n 1-4 scaling factor for electrostatic interaction\n \"\"\"\n force_groups_dict = {'HarmonicBondForce': 0,\n 'HarmonicAngleForce': 1,\n 'PeriodicTorsionForce': 2,\n 'NonbondedForce': 11,\n 'CMMotionRemover': 3,\n 'CustomBondForce': 5,\n 'CustomAngleForce': 6,\n 'CustomTorsionForce': 7,\n \"CMAPTorsionForce\": 12}\n\n def __init__(self, init_openmm=False, topology_format=None, top_file=None, crd_format=None, crd_file=None, charmm_param_file=None, xml_file=None,\n platform_name='Reference', system=None, integrator=None, platform=None, context=None, topology=None,\n integrator_params={'temperature': 300.0 * unit.kelvin, 'stepSize': 0.001 * unit.picoseconds, 'frictionCoeff': 2.0 / unit.picoseconds},\n create_system_params={'nonbondedMethod': app.NoCutoff, 'nonbondedCutoff': 1.2 * unit.nanometer, 'constraints': None, 'rigidWater': True}):\n\n self.topology_format = topology_format\n self.crd_format = crd_format\n self.xml_file = xml_file\n self.top_file = top_file\n self.crd_file = crd_file\n self.charmm_param_file = charmm_param_file\n\n # OpenMM essential object instances\n self.system = system\n self.integrator = integrator\n self.platform = platform\n self.context = context\n self.topology = topology\n\n # Platform-specific variables\n self.platform_name = platform_name if platform_name is not None else 'Reference'\n\n # Molecule-specific variables\n self.force_groups = []\n self.forces_indexes = {}\n self.atom_list = None\n self.atomic_number_list = None\n self.masses_list = None\n self.n_atoms = None\n self.cell = None\n\n # 1-4 Scaling parameters\n self.scnb = 0.833333 # scee\n self.scee = 0.5 # scnb\n\n # Params to be passed to OpenMM\n self._create_system_params = create_system_params\n self._integrator_params = integrator_params\n\n if init_openmm:\n self.init_openmm(self._integrator_params, self._create_system_params)\n\n if self.system is not None:\n self._set_force_groups()\n\n # ------------------------------------------------------------ #\n # #\n # PUBLIC METHODS #\n # #\n # ------------------------------------------------------------ #\n def init_openmm(self, integrator_params=None, create_system_params=None):\n \"\"\"\n Method that initiates OpenMM by creating\n\n Parameters\n ----------\n integrator_params : dict\n Keyword arguments passed to the simtk.openmm.openmm.LangevinIntegrator\n create_system_params : dict\n Keyword arguments passed to simtk.openmm.app.amberprmtopfile.createSystem\n\n Returns\n -------\n system : simtk.openmm.openmm.System\n OpenMM system created.\n \"\"\"\n from simtk.openmm import XmlSerializer\n\n assert self.topology_format is not None, \"No topology format was provided.\"\n assert self.crd_format is not None, \"No coordinate format was provided.\"\n\n if self.topology_format.upper() in [\"AMBER\", \"GROMACS\", \"CHARMM\"]:\n assert self.top_file is not None, \"Topology format is {} but no topology file was provided.\".format(self.topology_format)\n else:\n raise NotImplementedError(\"Topology format {} is not known.\".format(self.topology_format))\n\n assert self.crd_file is not None, \"create_system flag is True but no crd_file was provided.\"\n if self.platform_name is None:\n logging.info(\"No platform set. Will use reference.\")\n self.platform_name = \"Reference\"\n else:\n assert self.platform_name in [\"Reference\", \"CPU\", \"OpenCL\", \"CUDA\"], \"\"\"create_system flag is True but no\n correct platform was provided.\"\"\"\n\n # Read topology\n if self.topology_format.upper() == \"AMBER\":\n if self.topology is None:\n top = app.AmberPrmtopFile(self.top_file)\n self.topology = top.topology\n elif self.topology_format.upper() == \"GROMACS\":\n if self.topology is None:\n top = app.GromacsTopFile(self.top_file)\n self.topology = top.topology\n elif self.topology_format.upper() == \"CHARMM\":\n if self.topology is None:\n top = app.CharmmPsfFile(self.top_file)\n self.topology = top.topology\n charmm_params = app.CharmmParameterSet('charmm.rtf', self.charmm_param_file)\n else:\n raise NotImplementedError(\"Topology format {} is not currently supported.\".format(self.topology_format))\n\n # Read coordinate file\n if self.crd_format.upper() == \"AMBER\":\n crd = app.AmberInpcrdFile(self.crd_file)\n elif self.crd_format.upper() == \"GROMACS\":\n crd = app.GromacsGroFile(self.crd_file)\n elif self.crd_format.upper() == \"CHARMM\":\n crd = app.CharmmCrdFile(self.crd_file)\n elif self.crd_format.upper() == \"PDB\":\n crd = app.PDBFile(self.crd_file)\n else:\n raise NotImplementedError(\"Coordinate format {} is not currently supported.\".format(self.crd_format))\n\n if self.system is None:\n if self.xml_file is None:\n assert create_system_params is not None, \"No settings to create the system were provided.\"\n\n logging.info(\"Creating OpenMM System from {} file.\".format(self.topology_format))\n if self.topology_format.upper() == \"CHARMM\":\n self.system = top.createSystem(charmm_params, **create_system_params)\n else:\n self.system = top.createSystem(**create_system_params)\n else:\n logging.info(\"Creating OpenMM System from XML file.\")\n xml_file = open(self.xml_file)\n self.system = XmlSerializer.deserializeSystem(xml_file.read())\n xml_file.close()\n\n if self.integrator is None:\n assert integrator_params is not None, \"No settings to create the integrator were provided.\"\n\n self.integrator = openmm.LangevinIntegrator(integrator_params['temperature'], integrator_params[\"frictionCoeff\"], integrator_params[\"stepSize\"])\n logging.info(\"Creating OpenMM integrator.\")\n if self.platform is None:\n self.platform = openmm.Platform.getPlatformByName(self.platform_name)\n logging.info(\"Creating OpenMM platform.\")\n if self.context is None:\n self.context = openmm.Context(self.system, self.integrator, self.platform)\n logging.info(\"Creating OpenMM Context.\")\n\n # Set positions in context\n self.context.setPositions(crd.positions)\n\n return self.system\n\n def get_atom_list(self):\n \"\"\"\n Method that gets a list of the atom symbols.\n\n Returns\n -------\n atom_list : list of str\n List of the atom symbols of the system.\n \"\"\"\n assert self.topology is not None, \"OpenMM topology is not set.\"\n\n self.atom_list = []\n for atom in self.topology.atoms():\n self.atom_list.append(atom.element.symbol)\n\n return self.atom_list\n\n def get_atomic_numbers(self):\n \"\"\"\n Method that gets a list of the atomic numbers of the system.\n\n Returns\n -------\n atom_list : list of str\n List of the atomic numbers of the system.\n \"\"\"\n assert self.topology is not None, \"OpenMM topology is not set.\"\n\n self.atomic_number_list = []\n for atom in self.topology.atoms():\n self.atomic_number_list.append(atom.element.atomic_number)\n\n return self.atomic_number_list\n\n def get_number_of_atoms(self):\n \"\"\"\n Method that gets the number of atoms of the system.\n\n Returns\n -------\n n_atoms : n_int\n Number of atoms of the system.\n \"\"\"\n assert self.system is not None, \"OpenMM system is not set.\"\n\n self.n_atoms = self.system.getNumParticles()\n\n return self.n_atoms\n\n def get_masses(self):\n \"\"\"\n Method that gets the masses of atoms of the system (in amu).\n\n Returns\n -------\n masses : list of floats\n Masses of the atoms of the system.\n \"\"\"\n self.masses_list = []\n for atom_index in range(self.get_number_of_atoms()):\n self.masses_list.append(self.system.getParticleMass(atom_index))\n\n return self.masses_list\n\n def get_cell(self):\n \"\"\"\n Method that gets the cell vectors.\n\n Returns\n -------\n cell : np.array\n (3,3) array containing the cell vectors in angstrom but no simtk.units.\n \"\"\"\n assert self.system is not None, \"OpenMM system is not set.\"\n\n self.cell = np.zeros((3,3))\n openmm_cell = self.system.getDefaultPeriodicBoxVectors()\n\n for i in range(3):\n self.cell[i, i] = openmm_cell[i][i]._value\n\n self.cell = self.cell * 10.0 # nanometers to angstrom\n\n return self.cell\n\n def write_system_xml(self, file_name):\n \"\"\"\n Method that writes the OpenMM system stored in the `system` attribute to an XML file.\n\n Parameters\n ----------\n file_name : str\n Name of the XML file to be written.\n\n Returns\n -------\n `True` if file was closed successfully. `False` otherwise.\n \"\"\"\n\n from simtk.openmm import XmlSerializer\n\n logging.info(\"Writing serialized system to XML file {}.\".format(file_name))\n\n serialized_system = XmlSerializer.serializeSystem(self.system)\n outfile = open(file_name, 'w')\n outfile.write(serialized_system)\n outfile.close()\n\n return outfile.close()\n\n def minimize_system(self, tolerance=1, max_iter=0):\n \"\"\"\n Method that minimizes the system's energy starting from the state stored at the context attribute.\n\n Notes\n -----\n More information can be found at: https://simtk.org/api_docs/openmm/api3_1/classOpenMM_1_1LocalEnergyMinimizer.html\n\n Parameters\n ----------\n tolerance : float\n Specifies how precisely the energy minimum must be located. Minimization will be halted once the root-mean-square value of all force components reaches this tolerance.\n max_iter : int\n Maximum number of iterations to perform. If this is 0, minimation is continued until the results converge without regard to how many iterations it takes. The default value is 0.\n\n Returns\n -------\n context : simtk.openmm.openmm.Context\n Updated OpenMM Context.\n \"\"\"\n\n # Minimizing System\n openmm.LocalEnergyMinimizer.minimize(self.context, tolerance=tolerance, maxIterations=max_iter)\n\n return self.context\n\n # -----------------------------------------------------------------------------------------------------------------\n # --------------------------------------------- Custom Forces -----------------------------------------------------\n # -----------------------------------------------------------------------------------------------------------------\n def add_torsion_terms(self, periodicities=[1,2,3,4], phase_default=0.0, v_default=0.0):\n \"\"\"\n Method that adds the torsional terms with `periodicities` to the OpenMM system 'PeriodicTorsionForce' force group.\n\n Parameters\n ----------\n periodicities : list of int\n Torsional terms periodicities to be added. If these already exist nothing happens.\n phase_default :\n Value of the phase angle upon creation of the torsional term in radians.\n v_default :\n Value of the torsion barrier height upon creation of the torsional term in kJ/mol.\n\n Notes\n -----\n This should be used before creating the ParaMol representation of the Force Field.\n\n Returns\n -------\n context : simtk.openmm.openmm.Context\n Updated OpenMM Context.\n \"\"\"\n n_added = 0\n\n if self.force_groups_dict['PeriodicTorsionForce'] in self.force_groups:\n dihedral_force = self.system.getForce(self.force_groups_dict['PeriodicTorsionForce'])\n prev_dihedral = None\n for i in range(dihedral_force.getNumTorsions()):\n p1, p2, p3, p4, per, phase, k = dihedral_force.getTorsionParameters(i)\n\n curr_dihedral = [p1, p2, p3, p4]\n if curr_dihedral == prev_dihedral:\n prev_dihedral = curr_dihedral\n continue\n\n prev_dihedral = curr_dihedral\n for n in periodicities:\n if n != per:\n dihedral_force.addTorsion(p1, p2, p3, p4, n, phase_default, v_default)\n n_added += 1\n\n logging.info(\"Added {} extra-torsions.\".format(n_added))\n # Since the number of torsions has changes it is necessary to reinitialize the context\n # It is also convenient to set the positions so that MD simulations can be started without problems.\n positions_tmp = self.context.getState(getPositions=True).getPositions()\n self.context.reinitialize()\n self.context.setPositions(positions_tmp)\n\n return self.context\n\n # -----------------------------------------------------------------------------------------------------------------\n # -------------------------------------------- Energies and Forces ------------------------------------------------\n # -----------------------------------------------------------------------------------------------------------------\n def get_potential_energy(self, positions):\n \"\"\"\n Method that, given an array of positions (in nanometers), sets the context atomic positions and computes the potential energy.\n\n Parameters\n ----------\n positions: list or np.array\n Positions array\n\n Returns\n -------\n epot : float\n Potential energy value in kJ/mol.\n \"\"\"\n self.context.setPositions(positions)\n\n epot = self.context.getState(getEnergy=True).getPotentialEnergy()._value\n\n return epot\n \n def get_kinetic_energy(self, velocities=None):\n \"\"\"\n Method that computes the kinetic energy.\n\n Returns\n -------\n ekin : float\n Kinetic energy value in kJ/mol.\n \"\"\"\n if velocities is not None:\n self.set_velocities(velocities)\n\n ekin = self.context.getState(getEnergy=True).getKineticEnergy()._value\n\n return ekin\n\n def get_forces(self, positions):\n \"\"\"\n Method that, given an array of positions (in nanometers), sets the context atomic positions and computes the forces.\n\n Parameters\n ----------\n positions: list or np.array\n Positions array\n\n Returns\n -------\n forces : np.array\n (Natoms,3) arrary containing forces in kJ/mol/nm.\n \"\"\"\n self.context.setPositions(positions)\n\n forces = self.context.getState(getForces=True).getForces(asNumpy=True)._value\n\n return forces\n\n # -----------------------------------------------------------------------------------------------------------------\n # ------------------------------------------- Bonded Terms methods ------------------------------------------------\n # -----------------------------------------------------------------------------------------------------------------\n def set_harmonic_bond_force_parameters(self, ff_bond_terms):\n \"\"\"\n Method that updates in the OpenMM system the parameters of the terms belonging to the force group 'HarmonicBondForce'.\n\n Parameters\n ----------\n ff_bond_terms : list of :obj:`ParaMol.Force_field.force_field_term.FFTerm`\n List containing instances of :obj:`ParaMol.Force_field.force_field_term_parameter.Parameter` which belong to the force group 'HarmonicBondForce'.\n\n Returns\n -------\n context : simtk.openmm.openmm.Context\n Updated OpenMM Context.\n \"\"\"\n for k, bond_force_index in enumerate(self.forces_indexes[\"HarmonicBondForce\"]):\n bond_force = self.system.getForce(bond_force_index)\n\n for bond_term in ff_bond_terms[k]:\n bond_force.setBondParameters(bond_term.idx, *bond_term.atoms, bond_term.parameters[\"bond_eq\"].value,\n bond_term.parameters[\"bond_k\"].value)\n\n bond_force.updateParametersInContext(self.context)\n\n return self.context\n\n def set_harmonic_angle_force_parameters(self, ff_angle_terms):\n \"\"\"\n Method that updates in the OpenMM system the parameters of the terms belonging to the force group 'HarmonicAngleForce'.\n\n Parameters\n ----------\n ff_angle_terms : list of :obj:`ParaMol.Force_field.force_field_term.FFTerm`\n List containing instances of :obj:`ParaMol.Force_field.force_field_term_parameter.Parameter` which belong to the force group 'HarmonicAngleForce'.\n\n Returns\n -------\n context : simtk.openmm.openmm.Context\n Updated OpenMM Context.\n \"\"\"\n for k, angle_force_index in enumerate(self.forces_indexes[\"HarmonicAngleForce\"]):\n angle_force = self.system.getForce(angle_force_index)\n\n for angle_term in ff_angle_terms[k]:\n angle_force.setAngleParameters(angle_term.idx, *angle_term.atoms, angle_term.parameters[\"angle_eq\"].value,\n angle_term.parameters[\"angle_k\"].value)\n\n angle_force.updateParametersInContext(self.context)\n\n return self.context\n\n def set_periodic_torsion_force_parameters(self, ff_torsion_terms):\n \"\"\"\n Method that updates in the OpenMM system the parameters of the terms belonging to the force group 'PeriodicTorsionForce'.\n\n Parameters\n ----------\n ff_torsion_terms : list of :obj:`ParaMol.Force_field.force_field_term.FFTerm`\n List containing instances of :obj:`ParaMol.Force_field.force_field_term_parameter.Parameter` which belong to the force group 'PeriodicTorsionForce'.\n\n Returns\n -------\n context : simtk.openmm.openmm.Context\n Updated OpenMM Context.\n \"\"\"\n\n for k, torsion_force_index in enumerate(self.forces_indexes[\"PeriodicTorsionForce\"]):\n torsion_force = self.system.getForce(torsion_force_index)\n\n for torsion_term in ff_torsion_terms[k]:\n div_value = np.sign(torsion_term.parameters[\"torsion_phase\"].value) * 2.0 * np.pi\n\n if div_value == 0.0:\n div_value = 2.0 * np.pi\n\n torsion_force.setTorsionParameters(torsion_term.idx, *torsion_term.atoms,\n torsion_term.parameters[\"torsion_periodicity\"].value,\n torsion_term.parameters[\"torsion_phase\"].value % div_value,\n torsion_term.parameters[\"torsion_k\"].value)\n\n torsion_force.updateParametersInContext(self.context)\n\n return self.context\n\n def set_bonded_parameters(self, force_field_optimizable):\n \"\"\"\n Method that wraps the methods set_harmonic_bond_force_parameters, set_harmonic_angle_force_parameters, and set_periodic_torsion_force_parameters in order to ease the procedure of updating the system's bonded parameters.\n\n Parameters\n ----------\n force_field_optimizable : dict\n Dictionary that contains as keys force groups names as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.\n\n Returns\n -------\n context : simtk.openmm.openmm.Context\n Updated OpenMM Context.\n \"\"\"\n\n # Compute all bond term contributions\n if \"HarmonicBondForce\" in force_field_optimizable:\n self.set_harmonic_bond_force_parameters(force_field_optimizable[\"HarmonicBondForce\"])\n if \"HarmonicAngleForce\" in force_field_optimizable:\n self.set_harmonic_angle_force_parameters(force_field_optimizable[\"HarmonicAngleForce\"])\n if \"PeriodicTorsionForce\" in force_field_optimizable:\n self.set_periodic_torsion_force_parameters(force_field_optimizable[\"PeriodicTorsionForce\"])\n\n return self.context\n\n # -----------------------------------------------------------------------------------------------------------------\n # ----------------------------------------- Non-Bonded Terms methods ----------------------------------------------\n # -----------------------------------------------------------------------------------------------------------------\n def set_non_bonded_parameters_to_zero(self):\n \"\"\"\n Method that sets all non bonded parameters to zero, namely sigma, epsilon and charge to zero. This is done for all the particles and exceptions.\n\n Notes\n -----\n When a Context is created, it decides which interactions need to be calculated as exceptions and which ones are \"just exclusions\". Hence, any exception to be included has to be given a nonzero chargeprod initially. Once the Context is created, the number of exceptions can't be changed.\n More information: https://github.com/pandegroup/openmm/issues/252\n\n Returns\n -------\n context : simtk.openmm.openmm.Context\n Updated OpenMM Context.\n \"\"\"\n\n for k, nonbonded_force_index in enumerate(self.forces_indexes[\"NonbondedForce\"]):\n nonbonded_force = self.system.getForce(nonbonded_force_index)\n\n for i in range(nonbonded_force.getNumParticles()):\n #q, sigma, eps = nonbonded_force.getParticleParameters(i)\n\n # Set particle parameters to zero\n nonbonded_force.setParticleParameters(i, 0.0, 0.0, 0.0)\n\n if self.context.getPlatform().getName() in [\"CPU\", \"Reference\"]:\n # CPU platform raises the following exception if sigma, epsilon and charge are set to 0\n # Exception: updateParametersInContext: The number of non-excluded exceptions has changed\n # Solution is to set them to very small numbers.\n for i in range(nonbonded_force.getNumExceptions()):\n at1, at2, q, sigma, eps = nonbonded_force.getExceptionParameters(i)\n\n if abs(q._value) > 1e-16 and eps._value > 1e-16:\n nonbonded_force.setExceptionParameters(i, at1, at2, 1e-16, 1e-16, 1e-16)\n else:\n for i in range(nonbonded_force.getNumExceptions()):\n at1, at2, q, sigma, eps = nonbonded_force.getExceptionParameters(i)\n nonbonded_force.setExceptionParameters(i, at1, at2, 0.0, 0.0, 0.0)\n\n # Update parameters in context\n nonbonded_force.updateParametersInContext(self.context)\n\n return self.context\n\n def set_nonbonded_parameters(self, force_field_optimizable):\n \"\"\"\n Method that updates the non-bonded parameters of the OpenMM system.\n\n Parameters\n ----------\n force_field_optimizable : dict\n Dictionary that contains as keys force groups names as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.\n\n Returns\n -------\n context : simtk.openmm.openmm.Context\n Updated OpenMM Context.\n \"\"\"\n\n if \"NonbondedForce\" in force_field_optimizable and \"Scaling14\" not in force_field_optimizable:\n # If updating the non bonded force but not updating the 1-4 scaling factors\n\n nonbonded_force_terms = force_field_optimizable[\"NonbondedForce\"]\n\n for k, nonbonded_force_index in enumerate(self.forces_indexes[\"NonbondedForce\"]):\n nonbonded_force = self.system.getForce(nonbonded_force_index)\n\n for nonbonded_term in nonbonded_force_terms[k]:\n nonbonded_force.setParticleParameters(nonbonded_term.idx, nonbonded_term.parameters[\"charge\"].value,\n nonbonded_term.parameters[\"lj_sigma\"].value, nonbonded_term.parameters[\"lj_eps\"].value)\n\n nonbonded_force.updateParametersInContext(self.context)\n\n # ---------------------------------------------------------------- #\n # Non-Bonded Exceptions #\n # ---------------------------------------------------------------- #\n for i in range(nonbonded_force.getNumExceptions()):\n at1, at2, charge_prod, sigma, eps, = nonbonded_force.getExceptionParameters(i)\n\n if abs(charge_prod._value) < 1e-8:\n continue\n\n # Lorentz-Berthelot rules:\n\n # \\epsilon_{ij} = \\sqrt{\\epsilon_{ii} * \\epsilon_{jj}}\n epsilon = self.scee * np.sqrt(nonbonded_force_terms[k][at1].parameters[\"lj_eps\"].value*nonbonded_force_terms[k][at2].parameters[\"lj_eps\"].value)\n\n #epsilon = scee * np.sqrt(np.abs(nonbonded_force_terms[at1].parameters[\"lj_eps\"].value) *\n # np.abs(nonbonded_force_terms[at2].parameters[\"lj_eps\"].value)) * \\\n # np.sign(nonbonded_force_terms[at1].parameters[\"lj_eps\"].value) * \\\n # np.sign(nonbonded_force_terms[at2].parameters[\"lj_eps\"].value)\n\n # \\sigma_{ij} = (\\sigma_{ii} + \\sigma_{jj}) / 2\n # Not necessary to scale this value because epsilon controls the LJ 12-6 interaction scaling.\n sigma = 0.5 * (nonbonded_force_terms[k][at1].parameters[\"lj_sigma\"].value +\n nonbonded_force_terms[k][at2].parameters[\"lj_sigma\"].value)\n\n charge_prod = self.scnb * \\\n nonbonded_force_terms[k][at1].parameters[\"charge\"].value * \\\n nonbonded_force_terms[k][at2].parameters[\"charge\"].value\n\n nonbonded_force.setExceptionParameters(i, at1, at2, charge_prod, sigma, epsilon)\n\n # Update parameters in context\n nonbonded_force.updateParametersInContext(self.context)\n\n elif \"NonbondedForce\" not in force_field_optimizable and \"Scaling14\" in force_field_optimizable:\n # If updating the 1-4 scaling factors but not the non bonded force\n\n scaling_constants = force_field_optimizable[\"Scaling14\"]\n\n for k, nonbonded_force_index in enumerate(self.forces_indexes[\"NonbondedForce\"]):\n nonbonded_force = self.system.getForce(nonbonded_force_index)\n\n for i in range(len(scaling_constants[k])):\n at1, at2, _, sigma, _ = nonbonded_force.getExceptionParameters(i)\n chg1, sigma1, eps1 = nonbonded_force.getParticleParameters(at1)\n chg2, sigma2, eps2 = nonbonded_force.getParticleParameters(at2)\n scee_local = scaling_constants[k][i].parameters['scee'].value\n scnb_local = scaling_constants[k][i].parameters['scnb'].value\n\n scee_local = abs(scee_local)\n scnb_local = abs(scnb_local)\n\n # Lorentz-Berthelot rules:\n # \\epsilon_{ij} = \\sqrt{\\epsilon_{ii} * \\epsilon_{jj}}\n epsilon = scnb_local * np.sqrt(eps1*eps2)\n\n # scee*q1*q2\n charge_prod = scee_local * chg1 * chg2\n\n nonbonded_force.setExceptionParameters(i, at1, at2, charge_prod, sigma, epsilon)\n\n # Update parameters in context\n nonbonded_force.updateParametersInContext(self.context)\n else:\n # Not updating any non bonded parameter\n pass\n\n return self.context\n\n # ------------------------------------------------------------ #\n # #\n # MOLECULAR DYNAMICS METHODS #\n # #\n # ------------------------------------------------------------ #\n def set_positions(self, positions):\n \"\"\"\n Method that sets the Context positions.\n\n Parameters\n ----------\n positions : np.array\n Array containing the positions.\n \"\"\"\n assert self.context is not None, \"OpenMM context was not set.\"\n\n return self.context.setPositions(positions)\n\n def set_velocities(self, velocities):\n \"\"\"\n Method that sets the Context positions.\n\n Parameters\n ----------\n velocities : np.array\n Array containing the velocities.\n \"\"\"\n assert self.context is not None, \"OpenMM context was not set.\"\n\n return self.context.setVelocities(velocities)\n\n def get_positions(self):\n \"\"\"\n Method that gets the Context positions.\n\n Returns\n ----------\n positions : np.array\n Array containing the positions.\n \"\"\"\n assert self.context is not None, \"OpenMM context was not set.\"\n\n return self.context.getState(getPositions=True, enforcePeriodicBox=True).getPositions(asNumpy=True)\n\n def get_velocities(self):\n \"\"\"\n Method that gets the Context velocities.\n\n Returns\n ----------\n velocities : np.array\n Array containing the velocities.\n \"\"\"\n assert self.context is not None, \"OpenMM context was not set.\"\n\n return self.context.getState(getVelocities=True).getVelocities(asNumpy=True)\n\n def generate_maxwell_boltzmann_velocities(self, temperature):\n \"\"\"\n Generate random velocities for the solute.\n\n \"\"\"\n assert self.masses_list is not None\n assert self.n_atoms is not None\n\n # Initiate array\n vel = unit.Quantity(np.zeros([self.n_atoms, 3], np.float64), unit.nanometer / unit.picosecond) # velocities[i,k] is the kth component of the velocity of atom i\n kT = temperature * unit.BOLTZMANN_CONSTANT_kB\n kT = kT.in_units_of(unit.kilogram * unit.meter*unit.meter / (unit.second*unit.second))\n\n # Assign velocities from the Maxwell-Boltzmann distribution.\n for atom_index in range(self.n_atoms):\n mass = self.masses_list[atom_index]\n if mass._value > 1e-8:\n mass = unit.Quantity(mass._value * 1.66054e-27, unit.kilogram)\n # Standard deviation of velocity distribution for each coordinate for this atom\n sigma = unit.sqrt(kT / mass)\n else:\n sigma = 0.0 * unit.nanometer / unit.picosecond\n\n for k in range(3):\n # 0.001 is to take into account the ns / ps\n vel[atom_index, k] = (sigma * np.random.standard_normal())\n\n return vel.in_units_of(unit.nanometer / unit.picosecond)\n\n # ------------------------------------------------------------ #\n # #\n # PRIVATE METHODS #\n # #\n # ------------------------------------------------------------ #\n def _set_force_groups(self):\n \"\"\"\n Method that sets the force groups of all forces present in the system accordingly to the mapping defined in the forces_groups_dict dictionary.\n\n Returns\n -------\n force_groups : list of int\n List containing all the force groups present in the system.\n \"\"\"\n\n self.force_groups = []\n self.forces_indexes = {}\n\n force_idx = 0\n forces = self.system.getForces()\n for force in forces:\n # Get force group name\n force_key = force.__class__.__name__\n # Set force group number\n force.setForceGroup(self.force_groups_dict[force_key])\n # Get force group number\n self.force_groups.append(force.getForceGroup())\n\n if force_key not in self.forces_indexes:\n self.forces_indexes[force_key] = []\n\n self.forces_indexes[force_key].append(force_idx)\n force_idx += 1\n\n return self.force_groups\n"
] |
[
[
"numpy.sign",
"numpy.random.standard_normal",
"numpy.zeros",
"numpy.sqrt"
]
] |
m090009/Advanced_Lane_line_detection
|
[
"2967e7156e178eea305c205474d5a00887655840"
] |
[
"datasetclasses.py"
] |
[
"import utils\nfrom collections import Counter\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\n\n\nclass Dataset:\n def __init__(self,\n features,\n labels):\n # Splitting the data to 80% training and 20% validation\n if features:\n X_train, X_valid, y_train, y_valid = train_test_split(features, labels, test_size=0.2)\n self.train = DatasetElements(X_train, y_train)\n self.valid = DatasetElements(X_valid, y_valid)\n # self.test = DatasetElements(X_test, y_test)\n # self.label_mapping = self.get_label_mapping(label_mapping_filename)\n\n # def get_label_mapping(self, file_path):\n # return utils.read_csv(file_path=file_path)\n\n def add_testing_data(self, X_test, y_test):\n self.test = DatasetElements(X_test, y_test)\n\n\nclass DatasetElements:\n def __init__(self, X_data, y_data):\n self.X = X_data\n self.y = y_data\n\n @property\n def label_counter(self):\n # Create a counter to count the occurences of a sign (label)\n # values = list(set(self.y))\n # data_counter = Counter(values)\n # # We count each label occurence and store it in our label_counter\n # for label in self.y:\n # data_counter[label] += 1\n return utils.get_data_count(self.X, self.y)\n\n @property\n def len(self):\n return 0 if len(self.X) != len(self.y) else len(self.X)\n\n # def mapping(self):\n # return self.label_mapping[str(self.y)]\n\n def shuffle_data(self):\n self.X, self.y = shuffle(self.X, self.y)\n\n # def preprocess(self, normalize, equalize, grayscale):\n # return imageutils.preprocess_images(self.X,\n # grayscale=grayscale,\n # equalize=equalize,\n # normalize=True)\n"
] |
[
[
"sklearn.utils.shuffle",
"sklearn.model_selection.train_test_split"
]
] |
hsvgbkhgbv/ConvLab
|
[
"aab9e3f91605015d0d6a52e85a53d1d3a92ab137"
] |
[
"convlab/agent/algorithm/ddq.py"
] |
[
"# Modified by Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom convlab.agent import memory\nfrom convlab.agent import net\nfrom convlab.agent.algorithm.dqn import DQN\nfrom convlab.agent.net import net_util\nfrom convlab.lib import logger, util\nfrom convlab.lib.decorator import lab_api\n\nlogger = logger.get_logger(__name__)\n\n\nclass DDQ(DQN):\n '''\n Implementation of a simple DDQ algorithm.\n '''\n\n @lab_api\n def init_algorithm_params(self):\n # set default\n util.set_attr(self, dict(\n action_pdtype='Argmax',\n action_policy='epsilon_greedy',\n explore_var_spec=None,\n ))\n util.set_attr(self, self.algorithm_spec, [\n 'action_pdtype',\n 'action_policy',\n # explore_var is epsilon, tau or etc. depending on the action policy\n # these control the trade off between exploration and exploitaton\n 'explore_var_spec',\n 'gamma', # the discount factor\n 'training_batch_iter', # how many gradient updates per batch\n 'training_iter', # how many batches to train each time\n 'training_frequency', # how often to train (once a few timesteps)\n 'training_start_step', # how long before starting training\n 'planning_steps', # how many more step for planning\n ])\n super().init_algorithm_params()\n\n @lab_api\n def init_nets(self, global_nets=None):\n '''Initialize the neural network used to learn the Q function from the spec'''\n\n # [model-based] init world_model_nets\n self.state_dim = self.body.state_dim\n self.action_dim = net_util.get_out_dim(self.body)\n in_dim = [self.state_dim, self.action_dim]\n out_dim = [self.state_dim, 1, 1]\n \n WorldNetClass = getattr(net, self.world_net_spec['type'])\n self.world_net = WorldNetClass(self.world_net_spec,in_dim, out_dim)\n self.world_net_names = ['world_net']\n self.world_optim = net_util.get_optim(self.world_net, self.world_net.optim_spec)\n self.world_lr_scheduler = net_util.get_lr_scheduler(self.world_optim, self.world_net.lr_scheduler_spec)\n \n print(self.world_net)\n\n # initialize policy net \n super().init_nets(global_nets)\n\n @lab_api\n def train(self):\n '''\n Completes one training step for the agent if it is time to train.\n i.e. the environment timestep is greater than the minimum training timestep and a multiple of the training_frequency.\n Each training step consists of sampling n batches from the agent's memory.\n For each of the batches, the target Q values (q_targets) are computed and a single training step is taken k times\n Otherwise this function does nothing.\n '''\n if util.in_eval_lab_modes():\n return np.nan\n clock = self.body.env.clock\n if self.to_train == 1:\n total_loss = torch.tensor(0.0)\n for _ in range(self.training_iter):\n batch = self.sample()\n clock.set_batch_size(len(batch))\n for _ in range(self.training_batch_iter):\n loss = self.calc_q_loss(batch)\n self.net.train_step(loss, self.optim, self.lr_scheduler, clock=clock, global_net=self.global_net)\n total_loss += loss\n\n # [model-based]: train world_model on real data\n self.train_world_model(batch)\n\n # [model-based]: plan more steps with world_model\n for _ in range(self.planning_steps):\n for _ in range(self.training_iter):\n batch = self.sample()\n clock.set_batch_size(len(batch))\n for _ in range(self.training_batch_iter):\n fake_batch = self.planning(batch)\n loss = self.calc_q_loss(fake_batch) # this also inluences the priority in memory\n self.net.train_step(loss, self.optim, self.lr_scheduler, clock=clock, global_net=self.global_net)\n \n # reset\n self.to_train = 0\n logger.debug(f'Trained {self.name} at epi: {clock.epi}, frame: {clock.frame}, t: {clock.t}, total_reward so far: {self.body.total_reward}, loss: {loss:g}')\n return loss.item()\n else:\n return np.nan\n\n\n def train_world_model(self, batch):\n # zero_grad\n self.world_optim.zero_grad()\n\n # get predictions\n states_raw = batch[\"states\"]\n actions_raw = batch[\"actions\"]\n states = states_raw\n actions = F.one_hot(actions_raw.long(), self.action_dim).float()\n next_states, rewards, dones = self.world_net([states, actions])\n rewards = rewards.view(-1)\n dones = dones.view(-1)\n\n # compute loss\n loss_func_state = torch.nn.BCEWithLogitsLoss()\n loss_s = loss_func_state(next_states, batch[\"next_states\"])\n loss_func_reward = torch.nn.MSELoss()\n loss_r = loss_func_reward(rewards, batch[\"rewards\"])\n loss_func_terminal = torch.nn.BCEWithLogitsLoss()\n loss_t = loss_func_terminal(dones, batch[\"dones\"])\n loss = loss_s + loss_r + loss_t\n\n # update\n loss.backward()\n self.world_optim.step()\n\n def planning(self, batch):\n # get predictions\n states_raw = batch[\"states\"]\n actions_raw = batch[\"actions\"]\n states = states_raw\n actions = F.one_hot(actions_raw.long(), self.action_dim).float()\n next_states, rewards, dones = self.world_net([states, actions])\n rewards = rewards.view(-1)\n dones = dones.view(-1)\n\n # sample next_states/dones to [0,1]\n m = torch.distributions.Bernoulli(torch.sigmoid(next_states))\n next_states = m.sample()\n m = torch.distributions.Bernoulli(torch.sigmoid(dones))\n dones = m.sample()\n\n # create new batch\n new_batch = {}\n new_batch[\"states\"] = states_raw\n new_batch[\"next_states\"] = next_states\n new_batch[\"actions\"] = actions_raw\n new_batch[\"rewards\"] = rewards\n new_batch[\"dones\"] = dones\n return new_batch\n"
] |
[
[
"torch.sigmoid",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.MSELoss",
"torch.tensor"
]
] |
jim22k/grblas
|
[
"7c96ddd3cb485f2c98637e7fb96070ff6c48d219"
] |
[
"graphblas/monoid/numpy.py"
] |
[
"\"\"\" Create UDFs of numpy functions supported by numba.\n\nSee list of numpy ufuncs supported by numpy here:\n\nhttps://numba.readthedocs.io/en/stable/reference/numpysupported.html#math-operations\n\n\"\"\"\nimport numpy as _np\n\nfrom .. import _STANDARD_OPERATOR_NAMES\nfrom .. import binary as _binary\nfrom .. import config as _config\nfrom .. import monoid as _monoid\nfrom ..dtypes import _supports_complex\n\n_delayed = {}\n_complex_dtypes = {\"FC32\", \"FC64\"}\n_float_dtypes = {\"FP32\", \"FP64\"}\n_int_dtypes = {\"INT8\", \"UINT8\", \"INT16\", \"UINT16\", \"INT32\", \"UINT32\", \"INT64\", \"UINT64\"}\n_bool_int_dtypes = _int_dtypes | {\"BOOL\"}\n\n_monoid_identities = {\n # Math operations\n \"add\": 0,\n \"multiply\": 1,\n \"logaddexp\": dict.fromkeys(_float_dtypes, -_np.inf),\n \"logaddexp2\": dict.fromkeys(_float_dtypes, -_np.inf),\n \"gcd\": dict.fromkeys(_int_dtypes, 0),\n # Trigonometric functions\n \"hypot\": dict.fromkeys(_float_dtypes, 0.0),\n # Bit-twiddling functions\n \"bitwise_and\": {dtype: True if dtype == \"BOOL\" else -1 for dtype in _bool_int_dtypes},\n \"bitwise_or\": dict.fromkeys(_bool_int_dtypes, 0),\n \"bitwise_xor\": dict.fromkeys(_bool_int_dtypes, 0),\n # Comparison functions\n \"equal\": {\"BOOL\": True},\n \"logical_and\": {\"BOOL\": True},\n \"logical_or\": {\"BOOL\": True},\n \"logical_xor\": {\"BOOL\": False},\n \"maximum\": {\n \"BOOL\": False,\n \"INT8\": _np.iinfo(_np.int8).min,\n \"UINT8\": 0,\n \"INT16\": _np.iinfo(_np.int16).min,\n \"UINT16\": 0,\n \"INT32\": _np.iinfo(_np.int32).min,\n \"UINT32\": 0,\n \"INT64\": _np.iinfo(_np.int64).min,\n \"UINT64\": 0,\n \"FP32\": -_np.inf,\n \"FP64\": -_np.inf,\n },\n \"minimum\": {\n \"BOOL\": True,\n \"INT8\": _np.iinfo(_np.int8).max,\n \"UINT8\": _np.iinfo(_np.uint8).max,\n \"INT16\": _np.iinfo(_np.int16).max,\n \"UINT16\": _np.iinfo(_np.uint16).max,\n \"INT32\": _np.iinfo(_np.int32).max,\n \"UINT32\": _np.iinfo(_np.uint32).max,\n \"INT64\": _np.iinfo(_np.int64).max,\n \"UINT64\": _np.iinfo(_np.uint64).max,\n \"FP32\": _np.inf,\n \"FP64\": _np.inf,\n },\n \"fmax\": {\n \"BOOL\": False,\n \"INT8\": _np.iinfo(_np.int8).min,\n \"UINT8\": 0,\n \"INT16\": _np.iinfo(_np.int8).min,\n \"UINT16\": 0,\n \"INT32\": _np.iinfo(_np.int8).min,\n \"UINT32\": 0,\n \"INT64\": _np.iinfo(_np.int8).min,\n \"UINT64\": 0,\n \"FP32\": -_np.inf, # or _np.nan?\n \"FP64\": -_np.inf, # or _np.nan?\n },\n \"fmin\": {\n \"BOOL\": True,\n \"INT8\": _np.iinfo(_np.int8).max,\n \"UINT8\": _np.iinfo(_np.uint8).max,\n \"INT16\": _np.iinfo(_np.int16).max,\n \"UINT16\": _np.iinfo(_np.uint16).max,\n \"INT32\": _np.iinfo(_np.int32).max,\n \"UINT32\": _np.iinfo(_np.uint32).max,\n \"INT64\": _np.iinfo(_np.int64).max,\n \"UINT64\": _np.iinfo(_np.uint64).max,\n \"FP32\": _np.inf, # or _np.nan?\n \"FP64\": _np.inf, # or _np.nan?\n },\n}\nif _supports_complex:\n _monoid_identities[\"fmax\"].update(dict.fromkeys(_complex_dtypes, complex(-_np.inf, -_np.inf)))\n _monoid_identities[\"fmin\"].update(dict.fromkeys(_complex_dtypes, complex(_np.inf, _np.inf)))\n _monoid_identities[\"maximum\"].update(\n dict.fromkeys(_complex_dtypes, complex(-_np.inf, -_np.inf))\n )\n _monoid_identities[\"minimum\"].update(dict.fromkeys(_complex_dtypes, complex(_np.inf, _np.inf)))\n\n_STANDARD_OPERATOR_NAMES.update(f\"monoid.numpy.{name}\" for name in _monoid_identities)\n__all__ = list(_monoid_identities)\n_numpy_to_graphblas = {\n \"add\": \"plus\",\n \"bitwise_and\": \"band\",\n \"bitwise_or\": \"bor\",\n \"bitwise_xor\": \"bxor\",\n \"equal\": \"eq\",\n \"fmax\": \"max\", # ignores nan\n \"fmin\": \"min\", # ignores nan\n \"logical_and\": \"land\",\n \"logical_or\": \"lor\",\n \"logical_xor\": \"lxor\",\n \"multiply\": \"times\",\n}\n# _graphblas_to_numpy = {val: key for key, val in _numpy_to_graphblas.items()} # Soon...\n# Not included: maximum, minimum, gcd, hypot, logaddexp, logaddexp2\n\n\ndef __dir__():\n return globals().keys() | _delayed.keys() | _monoid_identities.keys()\n\n\ndef __getattr__(name):\n if name in _delayed:\n func, kwargs = _delayed.pop(name)\n if type(kwargs[\"binaryop\"]) is str:\n from ..binary import from_string\n\n kwargs[\"binaryop\"] = from_string(kwargs[\"binaryop\"])\n rv = func(**kwargs)\n globals()[name] = rv\n return rv\n if name not in _monoid_identities:\n raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n if _config.get(\"mapnumpy\") and name in _numpy_to_graphblas:\n globals()[name] = getattr(_monoid, _numpy_to_graphblas[name])\n else:\n from .. import operator\n\n func = getattr(_binary.numpy, name)\n operator.Monoid.register_new(f\"numpy.{name}\", func, _monoid_identities[name])\n return globals()[name]\n"
] |
[
[
"numpy.iinfo"
]
] |
axelande/scikit-fuzzy
|
[
"d9bac7a53d8e64f69f71b0fb4da2c8921b6aa7d0"
] |
[
"docs/tools/plot_pr.py"
] |
[
"import urllib\nimport json\nimport copy\nfrom collections import OrderedDict\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FuncFormatter\n\nimport dateutil.parser\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import datetime, timedelta\n\ncache = '_pr_cache.txt'\n\n# Obtain release dates using, e.g.,\n#\n# git log v0.4 -n 1 --format='%ai'\n#\n# The first two releases are commented out.\n# This was in the era before PRs.\n#\nreleases = OrderedDict([\n #('0.1', u'2009-10-07 13:52:19 +0200'),\n #('0.2', u'2009-11-12 14:48:45 +0200'),\n ('0.3', u'2011-10-10 03:28:47 -0700'),\n ('0.4', u'2011-12-03 14:31:32 -0800'),\n ('0.5', u'2012-02-26 21:00:51 -0800'),\n ('0.6', u'2012-06-24 21:37:05 -0700')])\n\n\nmonth_duration = 24\n\nfor r in releases:\n releases[r] = dateutil.parser.parse(releases[r])\n\ndef fetch_PRs(user='scikit-image', repo='scikit-image', state='open'):\n params = {'state': state,\n 'per_page': 100,\n 'page': 1}\n\n data = []\n page_data = True\n\n while page_data:\n config = {'user': user,\n 'repo': repo,\n 'params': urllib.urlencode(params)}\n\n fetch_status = 'Fetching page %(page)d (state=%(state)s)' % params + \\\n ' from %(user)s/%(repo)s...' % config\n print(fetch_status)\n\n f = urllib.urlopen(\n 'https://api.github.com/repos/%(user)s/%(repo)s/pulls?%(params)s' \\\n % config\n )\n\n params['page'] += 1\n\n page_data = json.loads(f.read())\n\n if 'message' in page_data and page_data['message'] == \"Not Found\":\n page_data = []\n print('Warning: Repo not found (%(user)s/%(repo)s)' % config)\n else:\n data.extend(page_data)\n\n return data\n\ntry:\n PRs = json.loads(open(cache, 'r').read())\n print('Loaded PRs from cache...')\n\nexcept IOError:\n PRs = fetch_PRs(user='stefanv', repo='scikits.image', state='closed')\n PRs.extend(fetch_PRs(state='open'))\n PRs.extend(fetch_PRs(state='closed'))\n\n cf = open(cache, 'w')\n cf.write(json.dumps(PRs))\n cf.flush()\n\nnrs = [pr['number'] for pr in PRs]\nprint('Processing %d pull requests...' % len(nrs))\n\ndates = [dateutil.parser.parse(pr['created_at']) for pr in PRs]\n\nepoch = datetime(2009, 1, 1, tzinfo=dates[0].tzinfo)\n\ndef seconds_from_epoch(dates):\n seconds = [(dt - epoch).total_seconds() for dt in dates]\n return seconds\n\ndates_f = seconds_from_epoch(dates)\n\ndef date_formatter(value, _):\n dt = epoch + timedelta(seconds=value)\n return dt.strftime('%Y/%m')\n\nplt.figure(figsize=(7, 5))\n\nnow = datetime.now(tz=dates[0].tzinfo)\nthis_month = datetime(year=now.year, month=now.month, day=1,\n tzinfo=dates[0].tzinfo)\n\nbins = [this_month - relativedelta(months=i) \\\n for i in reversed(range(-1, month_duration))]\nbins = seconds_from_epoch(bins)\nplt.hist(dates_f, bins=bins)\n\nax = plt.gca()\nax.xaxis.set_major_formatter(FuncFormatter(date_formatter))\nax.set_xticks(bins[:-1])\n\nlabels = ax.get_xticklabels()\nfor l in labels:\n l.set_rotation(40)\n l.set_size(10)\n\n\nfor version, date in releases.items():\n date = seconds_from_epoch([date])[0]\n plt.axvline(date, color='r', label=version)\n\nplt.title('Pull request activity').set_y(1.05)\nplt.xlabel('Date')\nplt.ylabel('PRs created')\nplt.legend(loc=2, title='Release')\nplt.subplots_adjust(top=0.875, bottom=0.225)\n\nplt.savefig('PRs.png')\n\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xlabel",
"matplotlib.ticker.FuncFormatter",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] |
dluvizon/3d-pose-consensus
|
[
"7a829d5713d2c45c6b265c9886add0b69e0050a8"
] |
[
"people/datasets/human36m.py"
] |
[
"import os\n\nimport json\nimport numpy as np\nimport scipy.io as sio\nfrom PIL import Image\n\nfrom .generic import GenericDataset\nfrom .generic import project_gt_poses_to_anchors\nfrom .generic import compute_anchors_reference\nfrom .generic import compute_window_reference\nfrom ..utils import *\n\nACTION_LABELS = None\nZBOUND = np.array([2378.56192888, 7916.5468051])\nMAX_Z = 8000\nBBOX_REF = 2000\n\n\ndef load_h36m_mat_annotation(filename):\n mat = sio.loadmat(filename, struct_as_record=False, squeeze_me=True)\n\n # Respect the order of TEST (0), TRAIN (1), and VALID (2)\n sequences = [mat['sequences_te'], mat['sequences_tr'], mat['sequences_val']]\n action_labels = mat['action_labels']\n joint_labels = mat['joint_labels']\n\n return sequences, action_labels, joint_labels\n\n\ndef load_h36m_mat_calib_test(filename):\n mat = sio.loadmat(filename, struct_as_record=False, squeeze_me=True)\n\n sequences = mat['sequences_te']\n action_labels = mat['action_labels']\n joint_labels = mat['joint_labels']\n\n return sequences, action_labels, joint_labels\n\n\ndef serialize_index_sequences(seq):\n frames_idx = []\n for s in range(len(seq)):\n for f in range(len(seq[s].frames)):\n frames_idx.append((s, f))\n\n return frames_idx\n\n\ndef parse_json_bbox(fname):\n with open(fname, 'r') as fid:\n data = json.load(fid)\n bbox_pred = np.zeros((len(data), 4))\n for i in range(len(data)):\n p = data['%d' % i]\n if len(p) == 0:\n p = [[200, 200, 800, 800]]\n\n obj, win = bbox_to_objposwin(p[0], square=True)\n bbox_pred[i] = objposwin_to_bbox(obj, 1.25*win)\n\n return bbox_pred\n\n\n\nclass Human36M(GenericDataset):\n \"\"\"Implementation of the Human3.6M dataset for 3D pose estimation, for\n training and validation splits.\n \"\"\"\n\n def __init__(self, dataset_path, dataconf,\n bbox_crop_mode='3d',\n remove_outer_joints=True,\n preprocess_mode='tf',\n recompute_zbound=False,\n zbound=None,\n bbox_file_train=None,\n bbox_file_val=None,\n pose_pred_train=None,\n pose_pred_val=None):\n \"\"\"Instanciates the class Human36M.\n\n How the bounding box is cropped:\n First, it checks for pose predictions (given by\n pose_pred_train/val). If available, it is used to crop the 3D\n bounding box. If not, it checks for the files bbox_file_train/val.\n If the file was given, it is used on validation. On training, the\n given bboxes are used with probability `bbox_ratio_train`, and\n ground truth 3D bounding boxes are used otherwise. If bbox_files\n are not given, only ground truth 3D bboxes are used.\n\n # Arguments\n\n dataset_path: string. Path to the Human3.6M dataset.\n dataconf: object DataConfig.\n bbox_crop_mode: string '2d', '3d' or None. Define the mode to crop\n each frame, considering the 2d points in the image plane, the\n 3d (u-v plus depth) for a 2^3 meters cube, or None for full\n frame.\n remove_outer_joints: boolean. Remove outer body joints (from the\n bounding box) or not.\n preprocess_mode: string.\n recompute_zbound: boolean. If True, recompute the bounding limits\n for absolute z.\n zbound: list of floats. Minimum and maximim values for absolute z.\n bbox_file_train and bbox_file_val: strings. Path to train/val json\n files containing 2d bounding boxes. It is replaced by the\n prediction pose if that is given.\n pose_pred_train and pose_pred_val: string or None. Path to a numpy\n file with previous pose predictions. These predictions, if\n geven, will be used to crop the bounding boxes. When not given,\n bounding boxes are cropped based on the ground truth poses.\n Predictions should be in the UVD format.\n \"\"\"\n GenericDataset.__init__(self,\n dataset_path,\n dataconf,\n poselayout=pa17j3d,\n remove_outer_joints=remove_outer_joints,\n preprocess_mode=preprocess_mode)\n\n self.bbox_crop_mode = bbox_crop_mode\n self.bbox_pred = 3*[None]\n self.pose_pred = 3*[None]\n\n\n if bbox_file_train is not None:\n self.bbox_pred[TRAIN_MODE] = parse_json_bbox(bbox_file_train)\n\n if bbox_file_val is not None:\n self.bbox_pred[VALID_MODE] = parse_json_bbox(bbox_file_val)\n\n if pose_pred_train is not None:\n self.pose_pred[TRAIN_MODE] = np.load(pose_pred_train)\n\n if pose_pred_val is not None:\n self.pose_pred[VALID_MODE] = np.load(pose_pred_val)\n\n self._load_annotations(os.path.join(dataset_path, 'annotations.mat'),\n recompute_zbound, zbound=zbound)\n\n\n def _load_annotations(self, filename, recompute_zbound, zbound=None):\n try:\n self.sequences, self.action_labels, self.joint_labels = \\\n load_h36m_mat_annotation(filename)\n self.frame_idx = [serialize_index_sequences(self.sequences[0]),\n serialize_index_sequences(self.sequences[1]),\n serialize_index_sequences(self.sequences[2])]\n\n global ACTION_LABELS\n ACTION_LABELS = self.action_labels\n\n if recompute_zbound:\n zarray = np.zeros((len(self.frame_idx[TRAIN_MODE]),))\n idx = 0\n warning('Recomputing Z-boundary for Human3.6M!')\n for seq in self.sequences[TRAIN_MODE]:\n cpar = seq.camera_parameters\n cam = Camera(cpar.R, cpar.T, cpar.f, cpar.c, cpar.p, cpar.k)\n pw = self.load_sequence_pose_annot(seq.frames)\n roots = cam.project(pw[:, 0, :])\n zarray[idx:idx+len(roots)] = roots[:, 2] / np.mean(cam.f)\n idx += len(roots)\n zarray[zarray < -1e6] = np.nan\n avg = np.nanmean(zarray)\n zmax = np.nanmax(zarray)\n zmin = np.nanmin(zarray)\n warning('avg {}, max {}, min {}'.format(avg, zmax, zmin))\n margin = 1.05 * max(avg - zmin, zmax - avg)\n self.zbound = np.array([avg - margin, avg + margin])\n warning('zbound: {}'.format(self.zbound))\n\n elif zbound is not None:\n self.zbound = zbound\n\n else:\n self.zbound = ZBOUND\n\n except:\n warning('Error loading Human3.6M dataset!')\n raise\n\n def get_meta(self, key, mode):\n seq_idx, frame_idx = self.frame_idx[mode][key]\n seq = self.sequences[mode][seq_idx]\n a = int(seq.name[1:3])\n s = int(seq.name[5:7])\n e = int(seq.name[9:11])\n c = int(seq.name[13:15])\n f = seq.frames[frame_idx].f\n\n return (a, s, e, c, f)\n\n\n def get_data(self, key, mode, frame_list=None):\n pl = self.poselayout # alias for poselayout\n output = {}\n\n if mode == TRAIN_MODE:\n dconf = self.dataconf.random_data_generator()\n random_clip = True\n else:\n dconf = self.dataconf.get_fixed_config()\n random_clip = False\n\n seq_idx, frame_idx = self.frame_idx[mode][key]\n seq = self.sequences[mode][seq_idx]\n objframe = seq.frames[frame_idx]\n\n \"\"\"Build a Camera object\"\"\"\n cpar = seq.camera_parameters\n cam = Camera(cpar.R, cpar.T, cpar.f, cpar.c, cpar.p, cpar.k)\n\n \"\"\"Load and project pose into the camera coordinates.\"\"\"\n pose_w = objframe.pose3d.T[h36m23j3d.map_from_h36m, 0:h36m23j3d.dim]\n pose_w = pose_w[h36m23j3d.map_to_pa17j]\n\n taux = T(None, img_size=(1, 1))\n taux.rotate_center(dconf['angle'])\n if dconf['hflip'] == 1:\n taux.horizontal_flip()\n\n pose_uvd = cam.project(pose_w, project_from_world=True)\n\n imgsize = (objframe.w, objframe.h)\n\n \"\"\"Compute bounding box.\"\"\"\n bbox_pred = pose_pred = None\n if self.bbox_pred[mode] is not None:\n bbox_pred = self.bbox_pred[mode][key]\n if self.pose_pred[mode] is not None:\n pose_pred = self.pose_pred[mode][key]\n\n objpos, winsize, zrange = auto_bbox_cropping(\n gt_pose_uvd=pose_uvd,\n focal=cam.f,\n box_size_mm=BBOX_REF,\n dconf=dconf,\n imgsize=imgsize,\n bbox_crop_mode=self.bbox_crop_mode,\n bbox_pred=bbox_pred,\n pose_pred=pose_pred,\n mode=mode)\n\n image = 'images.new/%s/%05d.jpg' % (seq.name, objframe.f)\n imgt = T(Image.open(os.path.join(self.dataset_path, image)))\n\n imgt.rotate_crop(dconf['angle'], objpos, winsize)\n if dconf['hflip'] == 1:\n imgt.horizontal_flip()\n\n imgt.resize(self.dataconf.crop_resolution)\n imgt.normalize_affinemap()\n imgframe = normalize_channels(imgt.asarray(),\n channel_power=dconf['chpower'], mode=self.preprocess_mode)\n\n if dconf['geoocclusion'] is not None:\n geo = dconf['geoocclusion']\n imgframe[geo[0]:geo[2], geo[1]:geo[3], :] = 0.\n\n \"\"\"Project pose to the full cropped region.\"\"\"\n tpose = np.empty(pose_uvd.shape)\n tpose[:, 0:2] = transform_2d_points(imgt.afmat, pose_uvd[:, 0:2],\n transpose=True)\n tpose[:, 2] = (pose_uvd[:, 2] - zrange[0]) / (zrange[1] - zrange[0])\n\n if imgt.hflip:\n tpose = tpose[pl.map_hflip, :]\n\n \"\"\"Set invalid values (-1e9).\"\"\"\n if self.remove_outer_joints:\n tpose[tpose < 0] = -1e9\n tpose[tpose > 1] = -1e9\n\n v = np.expand_dims(get_visible_joints(tpose[:, 0:2]), axis=-1)\n tpose = np.concatenate([tpose, v], axis=-1)\n\n \"\"\"Take the last transformation matrix, it should be the same for\n all frames.\n \"\"\"\n afmat = imgt.afmat.copy()\n output['afmat'] = afmat\n output['aref'] = compute_window_reference(afmat, imgsize)\n\n \"\"\"Convert the absolute Z to disparity\"\"\"\n rootz = pose_uvd[0:1, 2] / MAX_Z\n # rootz = np.mean(self.zbound) / \\\n # np.clip(pose_uvd[0:1, 2], self.zbound[0], self.zbound[1])\n\n output['camera'] = cam.serialize()\n output['action'] = int(seq.name[1:3]) - 1\n output['pose_w'] = pose_w\n output['pose_uvd'] = pose_uvd\n output['rootz'] = rootz\n output['hflip'] = np.array([imgt.hflip])\n output['pose'] = tpose\n output['frame'] = imgframe\n output['imgsize'] = np.array(imgsize)\n\n return output\n\n\n def load_sequence_pose_annot(self, frames):\n p = np.nan * np.ones((len(frames), h36m23j3d.num_joints, h36m23j3d.dim))\n\n for i in range(len(frames)):\n p[i, :] = frames[i].pose3d.T[h36m23j3d.map_from_h36m,\n 0:h36m23j3d.dim].copy()\n\n return p\n\n def get_shape(self, dictkey):\n if dictkey == 'frame':\n return self.dataconf.input_shape\n if dictkey == 'pose':\n return (self.poselayout.num_joints, self.poselayout.dim+1)\n if dictkey == 'pose_w':\n return (self.poselayout.num_joints, self.poselayout.dim)\n if dictkey == 'pose_uvd':\n return (self.poselayout.num_joints, self.poselayout.dim)\n if dictkey == 'rootz':\n return (1, )\n if dictkey == 'hflip':\n return (1, )\n if dictkey == 'aref':\n return (4,)\n if dictkey == 'action':\n return (1,)\n if dictkey == 'camera':\n return (21,)\n if dictkey == 'afmat':\n return (3, 3)\n if dictkey == 'imgsize':\n return (2,)\n raise Exception('Invalid dictkey `{}` on get_shape!'.format(dictkey))\n\n def get_length(self, mode):\n return len(self.frame_idx[mode])\n\n\nclass Human36MTest(GenericDataset):\n \"\"\"Implementation of the Human3.6M dataset for 3D pose estimation, testing\n samples.\n \"\"\"\n def __init__(self, dataset_path, dataconf,\n bbox_crop_mode='3d',\n preprocess_mode='tf',\n bbox_file_test=None,\n pose_pred_test=None):\n\n GenericDataset.__init__(self,\n dataset_path=dataset_path,\n dataconf=dataconf,\n poselayout=None,\n preprocess_mode=preprocess_mode)\n\n self.bbox_pred = None\n self.pose_pred = None\n\n if bbox_file_test is not None:\n self.bbox_pred = parse_json_bbox(bbox_file_test)\n\n self.bbox_crop_mode = bbox_crop_mode\n if pose_pred_test is not None:\n self.pose_pred = np.load(pose_pred_test)\n\n if self.bbox_crop_mode is not None:\n assert hasattr(self, 'pose_pred'), 'If using `bbox_crop_mode` ' \\\n + 'a valid `pose_pred_test` is required!'\n\n self._load_annotations(os.path.join(dataset_path, 'test_samples2.mat'))\n\n def _load_annotations(self, filename):\n try:\n self.sequences, self.action_labels, self.joint_labels = \\\n load_h36m_mat_calib_test(filename)\n\n frame_idx = []\n for s, seq in enumerate(self.sequences):\n for f in range(seq.num_frames):\n frame_idx.append((s, f))\n\n self.frame_idx = frame_idx\n\n global ACTION_LABELS\n ACTION_LABELS = self.action_labels\n\n self.zbound = ZBOUND\n\n except:\n warning('Error loading Human3.6M dataset!')\n raise\n\n def get_meta(self, key, mode=None):\n seq_idx, frame_idx = self.frame_idx[key]\n seq = self.sequences[seq_idx]\n a = int(seq.idname[1:3])\n s = int(seq.idname[5:7])\n e = int(seq.idname[9:11])\n c = int(seq.idname[13:15])\n f = frame_idx + 1\n\n return (a, s, e, c, f), seq\n\n def get_data(self, key, mode=None):\n output = {}\n\n dconf = self.dataconf.get_fixed_config()\n seq_idx, frame_idx = self.frame_idx[key]\n seq = self.sequences[seq_idx]\n\n filename = os.path.join(self.dataset_path,\n 'images.test', seq.sub, seq.name, '%05d.jpg' % (frame_idx + 1))\n imgt = T(Image.open(filename))\n imgsize = imgt.size\n\n cpar = seq.camera_parameters\n cam = Camera(cpar.R, cpar.T, cpar.f, cpar.c, cpar.p, cpar.k)\n\n bbox_pred = pose_pred = None\n if self.bbox_pred is not None:\n bbox_pred = self.bbox_pred[key]\n if self.pose_pred is not None:\n pose_pred = self.pose_pred[key]\n pose_pred = cam.project(self.pose_pred[key])\n\n objpos, winsize, zrange = auto_bbox_cropping(\n gt_pose_uvd=None,\n focal=cam.f,\n box_size_mm=BBOX_REF,\n dconf=dconf,\n imgsize=imgsize,\n bbox_crop_mode=self.bbox_crop_mode,\n bbox_pred=bbox_pred,\n pose_pred=pose_pred,\n mode=TEST_MODE)\n\n imgt.rotate_crop(dconf['angle'], objpos, winsize)\n if dconf['hflip'] == 1:\n imgt.horizontal_flip()\n\n imgt.resize(self.dataconf.crop_resolution)\n imgt.normalize_affinemap()\n\n afmat = imgt.afmat\n output['frame'] = normalize_channels(imgt.asarray(),\n channel_power=dconf['chpower'], mode=self.preprocess_mode)\n output['afmat'] = afmat.copy()\n output['aref'] = compute_anchors_reference(self.anchors, afmat, imgsize)\n output['camera'] = cam.serialize()\n output['imgsize'] = np.array(imgsize)\n\n return output\n\n def get_csv_filepath(self, key):\n seq_idx, frame_idx = self.frame_idx[key]\n seq = self.sequences[seq_idx]\n dpath = os.path.join(seq.sub, seq.name)\n fname = '%05d.csv' % (frame_idx + 1)\n\n return dpath, fname\n\n def get_shape(self, dictkey):\n if dictkey == 'frame':\n return self.dataconf.input_shape\n if dictkey == 'aref':\n return (self.num_anchors, 4)\n if dictkey == 'camera':\n return (21,)\n if dictkey == 'afmat':\n return (3, 3)\n if dictkey == 'imgsize':\n return (2,)\n raise Exception('Invalid dictkey `{}` on get_shape!'.format(dictkey))\n\n def get_length(self, mode=None):\n return len(self.frame_idx)\n\n"
] |
[
[
"numpy.nanmax",
"numpy.nanmin",
"scipy.io.loadmat",
"numpy.concatenate",
"numpy.mean",
"numpy.nanmean",
"numpy.load",
"numpy.array",
"numpy.empty"
]
] |
Plan-Bee/planbee_yolov5
|
[
"9f77823284a8df3f3bc975552c00593b7079e7ef"
] |
[
"yolov5/utils/general.py"
] |
[
"# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nGeneral utils\n\"\"\"\n\nimport contextlib\nimport glob\nimport logging\nimport math\nimport os\nimport platform\nimport random\nimport re\nimport shutil\nimport signal\nimport time\nimport urllib\nfrom datetime import datetime\nfrom itertools import repeat\nfrom multiprocessing.pool import ThreadPool\nfrom pathlib import Path\nfrom subprocess import check_output\nfrom zipfile import ZipFile\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport pkg_resources as pkg\nimport torch\nimport torchvision\nimport yaml\n\nfrom utils.downloads import gsutil_getsize\nfrom utils.metrics import box_iou, fitness\n\n# Settings\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nDATASETS_DIR = ROOT.parent / 'datasets' # YOLOv5 datasets directory\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads\nVERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode\nFONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf\n\ntorch.set_printoptions(linewidth=320, precision=5, profile='long')\nnp.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5\npd.options.display.max_columns = 10\ncv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)\nos.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads\nos.environ['OMP_NUM_THREADS'] = str(NUM_THREADS) # OpenMP max threads (PyTorch and SciPy)\n\n\ndef is_kaggle():\n # Is environment a Kaggle Notebook?\n try:\n assert os.environ.get('PWD') == '/kaggle/working'\n assert os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com'\n return True\n except AssertionError:\n return False\n\n\ndef is_writeable(dir, test=False):\n # Return True if directory has write permissions, test opening a file with write permissions if test=True\n if test: # method 1\n file = Path(dir) / 'tmp.txt'\n try:\n with open(file, 'w'): # open file with write permissions\n pass\n file.unlink() # remove file\n return True\n except OSError:\n return False\n else: # method 2\n return os.access(dir, os.R_OK) # possible issues on Windows\n\n\ndef set_logging(name=None, verbose=VERBOSE):\n # Sets level and returns logger\n if is_kaggle():\n for h in logging.root.handlers:\n logging.root.removeHandler(h) # remove all handlers associated with the root logger object\n rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings\n logging.basicConfig(format=\"%(message)s\", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING)\n return logging.getLogger(name)\n\n\nLOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.)\n\n\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\n # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required.\n env = os.getenv(env_var)\n if env:\n path = Path(env) # use environment variable\n else:\n cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs\n path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir\n path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable\n path.mkdir(exist_ok=True) # make if required\n return path\n\n\nCONFIG_DIR = user_config_dir() # Ultralytics settings dir\n\n\nclass Profile(contextlib.ContextDecorator):\n # Usage: @Profile() decorator or 'with Profile():' context manager\n def __enter__(self):\n self.start = time.time()\n\n def __exit__(self, type, value, traceback):\n print(f'Profile results: {time.time() - self.start:.5f}s')\n\n\nclass Timeout(contextlib.ContextDecorator):\n # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager\n def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n self.seconds = int(seconds)\n self.timeout_message = timeout_msg\n self.suppress = bool(suppress_timeout_errors)\n\n def _timeout_handler(self, signum, frame):\n raise TimeoutError(self.timeout_message)\n\n def __enter__(self):\n if platform.system() != 'Windows': # not supported on Windows\n signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM\n signal.alarm(self.seconds) # start countdown for SIGALRM to be raised\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if platform.system() != 'Windows':\n signal.alarm(0) # Cancel SIGALRM if it's scheduled\n if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError\n return True\n\n\nclass WorkingDirectory(contextlib.ContextDecorator):\n # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager\n def __init__(self, new_dir):\n self.dir = new_dir # new dir\n self.cwd = Path.cwd().resolve() # current dir\n\n def __enter__(self):\n os.chdir(self.dir)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n os.chdir(self.cwd)\n\n\ndef try_except(func):\n # try-except function. Usage: @try_except decorator\n def handler(*args, **kwargs):\n try:\n func(*args, **kwargs)\n except Exception as e:\n print(e)\n\n return handler\n\n\ndef methods(instance):\n # Get class/instance methods\n return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith(\"__\")]\n\n\ndef print_args(name, opt):\n # Print argparser arguments\n LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))\n\n\ndef init_seeds(seed=0):\n # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html\n # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible\n import torch.backends.cudnn as cudnn\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False)\n\n\ndef intersect_dicts(da, db, exclude=()):\n # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values\n return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}\n\n\ndef get_latest_run(search_dir='.'):\n # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)\n last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)\n return max(last_list, key=os.path.getctime) if last_list else ''\n\n\ndef is_docker():\n # Is environment a Docker container?\n return Path('/workspace').exists() # or Path('/.dockerenv').exists()\n\n\ndef is_colab():\n # Is environment a Google Colab instance?\n try:\n import google.colab\n return True\n except ImportError:\n return False\n\n\ndef is_pip():\n # Is file in a pip package?\n return 'site-packages' in Path(__file__).resolve().parts\n\n\ndef is_ascii(s=''):\n # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)\n s = str(s) # convert list, tuple, None, etc. to str\n return len(s.encode().decode('ascii', 'ignore')) == len(s)\n\n\ndef is_chinese(s='人工智能'):\n # Is string composed of any Chinese characters?\n return True if re.search('[\\u4e00-\\u9fff]', str(s)) else False\n\n\ndef emojis(str=''):\n # Return platform-dependent emoji-safe version of string\n return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str\n\n\ndef file_age(path=__file__):\n # Return days since last file update\n dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta\n return dt.days # + dt.seconds / 86400 # fractional days\n\n\ndef file_update_date(path=__file__):\n # Return human-readable file modification date, i.e. '2021-3-26'\n t = datetime.fromtimestamp(Path(path).stat().st_mtime)\n return f'{t.year}-{t.month}-{t.day}'\n\n\ndef file_size(path):\n # Return file/dir size (MB)\n mb = 1 << 20 # bytes to MiB (1024 ** 2)\n path = Path(path)\n if path.is_file():\n return path.stat().st_size / mb\n elif path.is_dir():\n return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb\n else:\n return 0.0\n\n\ndef check_online():\n # Check internet connectivity\n import socket\n try:\n socket.create_connection((\"1.1.1.1\", 443), 5) # check host accessibility\n return True\n except OSError:\n return False\n\n\ndef git_describe(path=ROOT): # path must be a directory\n # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe\n try:\n return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1]\n except Exception:\n return ''\n\n\n@try_except\n@WorkingDirectory(ROOT)\ndef check_git_status():\n # Recommend 'git pull' if code is out of date\n msg = ', for updates see https://github.com/ultralytics/yolov5'\n s = colorstr('github: ') # string\n assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg\n assert not is_docker(), s + 'skipping check (Docker image)' + msg\n assert check_online(), s + 'skipping check (offline)' + msg\n\n cmd = 'git fetch && git config --get remote.origin.url'\n url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch\n branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out\n n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind\n if n > 0:\n s += f\"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update.\"\n else:\n s += f'up to date with {url} ✅'\n LOGGER.info(emojis(s)) # emoji-safe\n\n\ndef check_python(minimum='3.6.2'):\n # Check current python version vs. required python version\n check_version(platform.python_version(), minimum, name='Python ', hard=True)\n\n\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\n # Check version vs. required version\n current, minimum = (pkg.parse_version(x) for x in (current, minimum))\n result = (current == minimum) if pinned else (current >= minimum) # bool\n s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string\n if hard:\n assert result, s # assert min requirements met\n if verbose and not result:\n LOGGER.warning(s)\n return result\n\n\n@try_except\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n assert file.exists(), f\"{prefix} {file.resolve()} not found, check failed.\"\n with file.open() as f:\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception: # DistributionNotFound or VersionConflict if requirements not met\n s = f\"{prefix} {r} not found and is required by YOLOv5\"\n if install:\n LOGGER.info(f\"{s}, attempting auto-update...\")\n try:\n assert check_online(), f\"'pip install {r}' skipped (offline)\"\n LOGGER.info(check_output(f\"pip install '{r}'\", shell=True).decode())\n n += 1\n except Exception as e:\n LOGGER.warning(f'{prefix} {e}')\n else:\n LOGGER.info(f'{s}. Please install and rerun your command.')\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n LOGGER.info(emojis(s))\n\n\ndef check_img_size(imgsz, s=32, floor=0):\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size\n\n\ndef check_imshow():\n # Check if environment supports image displays\n try:\n assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'\n assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'\n cv2.imshow('test', np.zeros((1, 1, 3)))\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n return True\n except Exception as e:\n LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\\n{e}')\n return False\n\n\ndef check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):\n # Check file(s) for acceptable suffix\n if file and suffix:\n if isinstance(suffix, str):\n suffix = [suffix]\n for f in file if isinstance(file, (list, tuple)) else [file]:\n s = Path(f).suffix.lower() # file suffix\n if len(s):\n assert s in suffix, f\"{msg}{f} acceptable suffix is {suffix}\"\n\n\ndef check_yaml(file, suffix=('.yaml', '.yml')):\n # Search/download YAML file (if necessary) and return path, checking suffix\n return check_file(file, suffix)\n\n\ndef check_file(file, suffix=''):\n # Search/download file (if necessary) and return path\n check_suffix(file, suffix) # optional\n file = str(file) # convert to str()\n if Path(file).is_file() or file == '': # exists\n return file\n elif file.startswith(('http:/', 'https:/')): # download\n url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/\n file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth\n if Path(file).is_file():\n LOGGER.info(f'Found {url} locally at {file}') # file already exists\n else:\n LOGGER.info(f'Downloading {url} to {file}...')\n torch.hub.download_url_to_file(url, file)\n assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check\n return file\n else: # search\n files = []\n for d in 'data', 'models', 'utils': # search directories\n files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file\n assert len(files), f'File not found: {file}' # assert file was found\n assert len(files) == 1, f\"Multiple files match '{file}', specify exact path: {files}\" # assert unique\n return files[0] # return file\n\n\ndef check_font(font=FONT):\n # Download font to CONFIG_DIR if necessary\n font = Path(font)\n if not font.exists() and not (CONFIG_DIR / font.name).exists():\n url = \"https://ultralytics.com/assets/\" + font.name\n LOGGER.info(f'Downloading {url} to {CONFIG_DIR / font.name}...')\n torch.hub.download_url_to_file(url, str(font), progress=False)\n\n\ndef check_dataset(data, autodownload=True):\n # Download and/or unzip dataset if not found locally\n # Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip\n\n # Download (optional)\n extract_dir = ''\n if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip\n download(data, dir=DATASETS_DIR, unzip=True, delete=False, curl=False, threads=1)\n data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml'))\n extract_dir, autodownload = data.parent, False\n\n # Read yaml (optional)\n if isinstance(data, (str, Path)):\n with open(data, errors='ignore') as f:\n data = yaml.safe_load(f) # dictionary\n\n # Resolve paths\n path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.'\n if not path.is_absolute():\n path = (ROOT / path).resolve()\n for k in 'train', 'val', 'test':\n if data.get(k): # prepend path\n data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]]\n\n # Parse yaml\n assert 'nc' in data, \"Dataset 'nc' key missing.\"\n if 'names' not in data:\n data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing\n train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))\n if val:\n val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path\n if not all(x.exists() for x in val):\n LOGGER.info('\\nDataset not found, missing paths: %s' % [str(x) for x in val if not x.exists()])\n if s and autodownload: # download script\n root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'\n if s.startswith('http') and s.endswith('.zip'): # URL\n f = Path(s).name # filename\n LOGGER.info(f'Downloading {s} to {f}...')\n torch.hub.download_url_to_file(s, f)\n Path(root).mkdir(parents=True, exist_ok=True) # create root\n ZipFile(f).extractall(path=root) # unzip\n Path(f).unlink() # remove zip\n r = None # success\n elif s.startswith('bash '): # bash script\n LOGGER.info(f'Running {s} ...')\n r = os.system(s)\n else: # python script\n r = exec(s, {'yaml': data}) # return None\n LOGGER.info(f\"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\\n\")\n else:\n raise Exception('Dataset not found.')\n\n return data # dictionary\n\n\ndef url2file(url):\n # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt\n url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/\n file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth\n return file\n\n\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1):\n # Multi-threaded file download and unzip function, used in data.yaml for autodownload\n def download_one(url, dir):\n # Download 1 file\n f = dir / Path(url).name # filename\n if Path(url).is_file(): # exists in current path\n Path(url).rename(f) # move to dir\n elif not f.exists():\n LOGGER.info(f'Downloading {url} to {f}...')\n if curl:\n os.system(f\"curl -L '{url}' -o '{f}' --retry 9 -C -\") # curl download, retry and resume on fail\n else:\n torch.hub.download_url_to_file(url, f, progress=True) # torch download\n if unzip and f.suffix in ('.zip', '.gz'):\n LOGGER.info(f'Unzipping {f}...')\n if f.suffix == '.zip':\n ZipFile(f).extractall(path=dir) # unzip\n elif f.suffix == '.gz':\n os.system(f'tar xfz {f} --directory {f.parent}') # unzip\n if delete:\n f.unlink() # remove zip\n\n dir = Path(dir)\n dir.mkdir(parents=True, exist_ok=True) # make directory\n if threads > 1:\n pool = ThreadPool(threads)\n pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded\n pool.close()\n pool.join()\n else:\n for u in [url] if isinstance(url, (str, Path)) else url:\n download_one(u, dir)\n\n\ndef make_divisible(x, divisor):\n # Returns nearest x divisible by divisor\n if isinstance(divisor, torch.Tensor):\n divisor = int(divisor.max()) # to int\n return math.ceil(x / divisor) * divisor\n\n\ndef clean_str(s):\n # Cleans a string by replacing special characters with underscore _\n return re.sub(pattern=\"[|@#!¡·$€%&()=?¿^*;:,¨´><+]\", repl=\"_\", string=s)\n\n\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\n # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf\n return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1\n\n\ndef colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']\n\n\ndef labels_to_class_weights(labels, nc=80):\n # Get class weights (inverse frequency) from training labels\n if labels[0] is None: # no labels loaded\n return torch.Tensor()\n\n labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO\n classes = labels[:, 0].astype(np.int) # labels = [class xywh]\n weights = np.bincount(classes, minlength=nc) # occurrences per class\n\n # Prepend gridpoint count (for uCE training)\n # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image\n # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start\n\n weights[weights == 0] = 1 # replace empty bins with 1\n weights = 1 / weights # number of targets per class\n weights /= weights.sum() # normalize\n return torch.from_numpy(weights)\n\n\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\n # Produces image weights based on class_weights and image contents\n class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])\n image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)\n # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample\n return image_weights\n\n\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\n # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco\n # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]\n return x\n\n\ndef xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y\n\n\ndef xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y\n\n\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\n # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x\n y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y\n y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x\n y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y\n return y\n\n\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right\n if clip:\n clip_coords(x, (h - eps, w - eps)) # warning: inplace clip\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center\n y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center\n y[:, 2] = (x[:, 2] - x[:, 0]) / w # width\n y[:, 3] = (x[:, 3] - x[:, 1]) / h # height\n return y\n\n\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\n # Convert normalized segments into pixel segments, shape (n,2)\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = w * x[:, 0] + padw # top left x\n y[:, 1] = h * x[:, 1] + padh # top left y\n return y\n\n\ndef segment2box(segment, width=640, height=640):\n # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)\n x, y = segment.T # segment xy\n inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)\n x, y, = x[inside], y[inside]\n return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy\n\n\ndef segments2boxes(segments):\n # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)\n boxes = []\n for s in segments:\n x, y = s.T # segment xy\n boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy\n return xyxy2xywh(np.array(boxes)) # cls, xywh\n\n\ndef resample_segments(segments, n=1000):\n # Up-sample an (n,2) segment\n for i, s in enumerate(segments):\n x = np.linspace(0, len(s) - 1, n)\n xp = np.arange(len(s))\n segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy\n return segments\n\n\ndef scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords\n\n\ndef clip_coords(boxes, shape):\n # Clip bounding xyxy bounding boxes to image shape (height, width)\n if isinstance(boxes, torch.Tensor): # faster individually\n boxes[:, 0].clamp_(0, shape[1]) # x1\n boxes[:, 1].clamp_(0, shape[0]) # y1\n boxes[:, 2].clamp_(0, shape[1]) # x2\n boxes[:, 3].clamp_(0, shape[0]) # y2\n else: # np.array (faster grouped)\n boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2\n boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2\n\n\ndef non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n lb = labels[xi]\n v = torch.zeros((len(lb), nc + 5), device=x.device)\n v[:, :4] = lb[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n LOGGER.warning(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output\n\n\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\n # Strip optimizer from 'f' to finalize training, optionally save as 's'\n x = torch.load(f, map_location=torch.device('cpu'))\n if x.get('ema'):\n x['model'] = x['ema'] # replace model with ema\n for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys\n x[k] = None\n x['epoch'] = -1\n x['model'].half() # to FP16\n for p in x['model'].parameters():\n p.requires_grad = False\n torch.save(x, s or f)\n mb = os.path.getsize(s or f) / 1E6 # filesize\n LOGGER.info(f\"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB\")\n\n\ndef print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):\n evolve_csv = save_dir / 'evolve.csv'\n evolve_yaml = save_dir / 'hyp_evolve.yaml'\n keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',\n 'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps]\n keys = tuple(x.strip() for x in keys)\n vals = results + tuple(hyp.values())\n n = len(keys)\n\n # Download (optional)\n if bucket:\n url = f'gs://{bucket}/evolve.csv'\n if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0):\n os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local\n\n # Log to evolve.csv\n s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\\n') # add header\n with open(evolve_csv, 'a') as f:\n f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\\n')\n\n # Save yaml\n with open(evolve_yaml, 'w') as f:\n data = pd.read_csv(evolve_csv)\n data = data.rename(columns=lambda x: x.strip()) # strip keys\n i = np.argmax(fitness(data.values[:, :4])) #\n generations = len(data)\n f.write('# YOLOv5 Hyperparameter Evolution Results\\n' +\n f'# Best generation: {i}\\n' +\n f'# Last generation: {generations - 1}\\n' +\n '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\\n' +\n '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\\n\\n')\n yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False)\n\n # Print to screen\n LOGGER.info(prefix + f'{generations} generations finished, current result:\\n' +\n prefix + ', '.join(f'{x.strip():>20s}' for x in keys) + '\\n' +\n prefix + ', '.join(f'{x:20.5g}' for x in vals) + '\\n\\n')\n\n if bucket:\n os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload\n\n\ndef apply_classifier(x, model, img, im0):\n # Apply a second stage classifier to YOLO outputs\n # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()\n im0 = [im0] if isinstance(im0, np.ndarray) else im0\n for i, d in enumerate(x): # per image\n if d is not None and len(d):\n d = d.clone()\n\n # Reshape and pad cutouts\n b = xyxy2xywh(d[:, :4]) # boxes\n b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square\n b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad\n d[:, :4] = xywh2xyxy(b).long()\n\n # Rescale boxes from img_size to im0 size\n scale_coords(img.shape[2:], d[:, :4], im0[i].shape)\n\n # Classes\n pred_cls1 = d[:, 5].long()\n ims = []\n for j, a in enumerate(d): # per item\n cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]\n im = cv2.resize(cutout, (224, 224)) # BGR\n # cv2.imwrite('example%i.jpg' % j, cutout)\n\n im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32\n im /= 255 # 0 - 255 to 0.0 - 1.0\n ims.append(im)\n\n pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction\n x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections\n\n return x\n\n\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\n # Increment file or directory path, i.e. runs/denoise --> runs/denoise{sep}2, runs/denoise{sep}3, ... etc.\n path = Path(path) # os-agnostic\n if path.exists() and not exist_ok:\n path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')\n dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n matches = [re.search(rf\"%s{sep}(\\d+)\" % path.stem, d) for d in dirs]\n i = [int(m.groups()[0]) for m in matches if m] # indices\n n = max(i) + 1 if i else 2 # increment number\n path = Path(f\"{path}{sep}{n}{suffix}\") # increment path\n if mkdir:\n path.mkdir(parents=True, exist_ok=True) # make directory\n return path\n\n\n# Variables\nNCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm\n"
] |
[
[
"torch.zeros",
"torch.cat",
"numpy.concatenate",
"torch.device",
"torch.save",
"pandas.read_csv",
"torch.mm",
"torch.from_numpy",
"torch.tensor",
"numpy.copy",
"numpy.interp",
"numpy.zeros",
"numpy.ascontiguousarray",
"torch.set_printoptions",
"torch.hub.download_url_to_file",
"numpy.array",
"numpy.random.seed",
"torch.Tensor",
"torch.manual_seed",
"numpy.set_printoptions",
"numpy.ones",
"numpy.bincount"
]
] |
ErasmusMC-Bioinformatics/tools-iuc
|
[
"2efdef3fbd1ee73c61547ff07a7da6a2b8269f6d"
] |
[
"tools/vsnp/vsnp_build_tables.py"
] |
[
"#!/usr/bin/env python\n\nimport argparse\nimport multiprocessing\nimport os\nimport queue\nimport re\n\nimport pandas\nimport pandas.io.formats.excel\nfrom Bio import SeqIO\n\n# Maximum columns allowed in a LibreOffice\n# spreadsheet is 1024. Excel allows for\n# 16,384 columns, but we'll set the lower\n# number as the maximum. Some browsers\n# (e.g., Firefox on Linux) are configured\n# to use LibreOffice for Excel spreadsheets.\nMAXCOLS = 1024\nOUTPUT_EXCEL_DIR = 'output_excel_dir'\nINPUT_JSON_AVG_MQ_DIR = 'input_json_avg_mq_dir'\nINPUT_JSON_DIR = 'input_json_dir'\nINPUT_NEWICK_DIR = 'input_newick_dir'\n\n\ndef annotate_table(table_df, group, annotation_dict):\n for gbk_chrome, pro in list(annotation_dict.items()):\n ref_pos = list(table_df)\n ref_series = pandas.Series(ref_pos)\n ref_df = pandas.DataFrame(ref_series.str.split(':', expand=True).values, columns=['reference', 'position'])\n all_ref = ref_df[ref_df['reference'] == gbk_chrome]\n positions = all_ref.position.to_frame()\n # Create an annotation file.\n annotation_file = \"%s_annotations.csv\" % group\n with open(annotation_file, \"a\") as fh:\n for _, row in positions.iterrows():\n pos = row.position\n try:\n aaa = pro.iloc[pro.index.get_loc(int(pos))][['chrom', 'locus', 'product', 'gene']]\n try:\n chrom, name, locus, tag = aaa.values[0]\n print(\"{}:{}\\t{}, {}, {}\".format(chrom, pos, locus, tag, name), file=fh)\n except ValueError:\n # If only one annotation for the entire\n # chromosome (e.g., flu) then having [0] fails\n chrom, name, locus, tag = aaa.values\n print(\"{}:{}\\t{}, {}, {}\".format(chrom, pos, locus, tag, name), file=fh)\n except KeyError:\n print(\"{}:{}\\tNo annotated product\".format(gbk_chrome, pos), file=fh)\n # Read the annotation file into a data frame.\n annotations_df = pandas.read_csv(annotation_file, sep='\\t', header=None, names=['index', 'annotations'], index_col='index')\n # Remove the annotation_file from disk since both\n # cascade and sort tables are built using the file,\n # and it is opened for writing in append mode.\n os.remove(annotation_file)\n # Process the data.\n table_df_transposed = table_df.T\n table_df_transposed.index = table_df_transposed.index.rename('index')\n table_df_transposed = table_df_transposed.merge(annotations_df, left_index=True, right_index=True)\n table_df = table_df_transposed.T\n return table_df\n\n\ndef excel_formatter(json_file_name, excel_file_name, group, annotation_dict):\n pandas.io.formats.excel.header_style = None\n table_df = pandas.read_json(json_file_name, orient='split')\n if annotation_dict is not None:\n table_df = annotate_table(table_df, group, annotation_dict)\n else:\n table_df = table_df.append(pandas.Series(name='no annotations'))\n writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter')\n table_df.to_excel(writer, sheet_name='Sheet1')\n writer_book = writer.book\n ws = writer.sheets['Sheet1']\n format_a = writer_book.add_format({'bg_color': '#58FA82'})\n format_g = writer_book.add_format({'bg_color': '#F7FE2E'})\n format_c = writer_book.add_format({'bg_color': '#0000FF'})\n format_t = writer_book.add_format({'bg_color': '#FF0000'})\n format_normal = writer_book.add_format({'bg_color': '#FDFEFE'})\n formatlowqual = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})\n format_ambigous = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})\n format_n = writer_book.add_format({'bg_color': '#E2CFDD'})\n rows, cols = table_df.shape\n ws.set_column(0, 0, 30)\n ws.set_column(1, cols, 2.1)\n ws.freeze_panes(2, 1)\n format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})\n # Set last row.\n ws.set_row(rows + 1, cols + 1, format_annotation)\n # Make sure that row/column locations don't overlap.\n ws.conditional_format(rows - 2, 1, rows - 1, cols, {'type': 'cell', 'criteria': '<', 'value': 55, 'format': formatlowqual})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'cell', 'criteria': '==', 'value': 'B$2', 'format': format_normal})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'A', 'format': format_a})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'G', 'format': format_g})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'C', 'format': format_c})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'T', 'format': format_t})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'S', 'format': format_ambigous})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'Y', 'format': format_ambigous})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'R', 'format': format_ambigous})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'W', 'format': format_ambigous})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'K', 'format': format_ambigous})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'M', 'format': format_ambigous})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'N', 'format': format_n})\n ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': '-', 'format': format_n})\n format_rotation = writer_book.add_format({})\n format_rotation.set_rotation(90)\n for column_num, column_name in enumerate(list(table_df.columns)):\n ws.write(0, column_num + 1, column_name, format_rotation)\n format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})\n # Set last row.\n ws.set_row(rows, 400, format_annotation)\n writer.save()\n\n\ndef get_annotation_dict(gbk_file):\n gbk_dict = SeqIO.to_dict(SeqIO.parse(gbk_file, \"genbank\"))\n annotation_dict = {}\n tmp_file = \"features.csv\"\n # Create a file of chromosomes and features.\n for chromosome in list(gbk_dict.keys()):\n with open(tmp_file, 'w+') as fh:\n for feature in gbk_dict[chromosome].features:\n if \"CDS\" in feature.type or \"rRNA\" in feature.type:\n try:\n product = feature.qualifiers['product'][0]\n except KeyError:\n product = None\n try:\n locus = feature.qualifiers['locus_tag'][0]\n except KeyError:\n locus = None\n try:\n gene = feature.qualifiers['gene'][0]\n except KeyError:\n gene = None\n fh.write(\"%s\\t%d\\t%d\\t%s\\t%s\\t%s\\n\" % (chromosome, int(feature.location.start), int(feature.location.end), locus, product, gene))\n # Read the chromosomes and features file into a data frame.\n df = pandas.read_csv(tmp_file, sep='\\t', names=[\"chrom\", \"start\", \"stop\", \"locus\", \"product\", \"gene\"])\n # Process the data.\n df = df.sort_values(['start', 'gene'], ascending=[True, False])\n df = df.drop_duplicates('start')\n pro = df.reset_index(drop=True)\n pro.index = pandas.IntervalIndex.from_arrays(pro['start'], pro['stop'], closed='both')\n annotation_dict[chromosome] = pro\n return annotation_dict\n\n\ndef get_sample_name(file_path):\n base_file_name = os.path.basename(file_path)\n if base_file_name.find(\".\") > 0:\n # Eliminate the extension.\n return os.path.splitext(base_file_name)[0]\n return base_file_name\n\n\ndef output_cascade_table(cascade_order, mqdf, group, annotation_dict):\n cascade_order_mq = pandas.concat([cascade_order, mqdf], join='inner')\n output_table(cascade_order_mq, \"cascade\", group, annotation_dict)\n\n\ndef output_excel(df, type_str, group, annotation_dict, count=None):\n # Output the temporary json file that\n # is used by the excel_formatter.\n if count is None:\n if group is None:\n json_file_name = os.path.join(OUTPUT_EXCEL_DIR, \"%s_order_mq.json\" % type_str)\n excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, \"%s_table.xlsx\" % type_str)\n else:\n json_file_name = os.path.join(OUTPUT_EXCEL_DIR, \"%s_%s_order_mq.json\" % (group, type_str))\n excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, \"%s_%s_table.xlsx\" % (group, type_str))\n else:\n # The table has more columns than is allowed by the\n # MAXCOLS setting, so multiple files will be produced\n # as an output collection.\n if group is None:\n json_file_name = os.path.join(OUTPUT_EXCEL_DIR, \"%s_order_mq_%d.json\" % (type_str, count))\n excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, \"%s_table_%d.xlsx\" % (type_str, count))\n else:\n json_file_name = os.path.join(OUTPUT_EXCEL_DIR, \"%s_%s_order_mq_%d.json\" % (group, type_str, count))\n excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, \"%s_%s_table_%d.xlsx\" % (group, type_str, count))\n df.to_json(json_file_name, orient='split')\n # Output the Excel file.\n excel_formatter(json_file_name, excel_file_name, group, annotation_dict)\n\n\ndef output_sort_table(cascade_order, mqdf, group, annotation_dict):\n sort_df = cascade_order.T\n sort_df['abs_value'] = sort_df.index\n sort_df[['chrom', 'pos']] = sort_df['abs_value'].str.split(':', expand=True)\n sort_df = sort_df.drop(['abs_value', 'chrom'], axis=1)\n sort_df.pos = sort_df.pos.astype(int)\n sort_df = sort_df.sort_values(by=['pos'])\n sort_df = sort_df.drop(['pos'], axis=1)\n sort_df = sort_df.T\n sort_order_mq = pandas.concat([sort_df, mqdf], join='inner')\n output_table(sort_order_mq, \"sort\", group, annotation_dict)\n\n\ndef output_table(df, type_str, group, annotation_dict):\n if isinstance(group, str) and group.startswith(\"dataset\"):\n # Inputs are single files, not collections,\n # so input file names are not useful for naming\n # output files.\n group_str = None\n else:\n group_str = group\n count = 0\n chunk_start = 0\n chunk_end = 0\n column_count = df.shape[1]\n if column_count >= MAXCOLS:\n # Here the number of columns is greater than\n # the maximum allowed by Excel, so multiple\n # outputs will be produced.\n while column_count >= MAXCOLS:\n count += 1\n chunk_end += MAXCOLS\n df_of_type = df.iloc[:, chunk_start:chunk_end]\n output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)\n chunk_start += MAXCOLS\n column_count -= MAXCOLS\n count += 1\n df_of_type = df.iloc[:, chunk_start:]\n output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)\n else:\n output_excel(df, type_str, group_str, annotation_dict)\n\n\ndef preprocess_tables(task_queue, annotation_dict, timeout):\n while True:\n try:\n tup = task_queue.get(block=True, timeout=timeout)\n except queue.Empty:\n break\n newick_file, json_file, json_avg_mq_file = tup\n avg_mq_series = pandas.read_json(json_avg_mq_file, typ='series', orient='split')\n # Map quality to dataframe.\n mqdf = avg_mq_series.to_frame(name='MQ')\n mqdf = mqdf.T\n # Get the group.\n group = get_sample_name(newick_file)\n snps_df = pandas.read_json(json_file, orient='split')\n with open(newick_file, 'r') as fh:\n for line in fh:\n line = re.sub('[:,]', '\\n', line)\n line = re.sub('[)(]', '', line)\n line = re.sub(r'[0-9].*\\.[0-9].*\\n', '', line)\n line = re.sub('root\\n', '', line)\n sample_order = line.split('\\n')\n sample_order = list([_f for _f in sample_order if _f])\n sample_order.insert(0, 'root')\n tree_order = snps_df.loc[sample_order]\n # Count number of SNPs in each column.\n snp_per_column = []\n for column_header in tree_order:\n count = 0\n column = tree_order[column_header]\n for element in column:\n if element != column[0]:\n count = count + 1\n snp_per_column.append(count)\n row1 = pandas.Series(snp_per_column, tree_order.columns, name=\"snp_per_column\")\n # Count number of SNPS from the\n # top of each column in the table.\n snp_from_top = []\n for column_header in tree_order:\n count = 0\n column = tree_order[column_header]\n # for each element in the column\n # skip the first element\n for element in column[1:]:\n if element == column[0]:\n count = count + 1\n else:\n break\n snp_from_top.append(count)\n row2 = pandas.Series(snp_from_top, tree_order.columns, name=\"snp_from_top\")\n tree_order = tree_order.append([row1])\n tree_order = tree_order.append([row2])\n # In pandas=0.18.1 even this does not work:\n # abc = row1.to_frame()\n # abc = abc.T --> tree_order.shape (5, 18), abc.shape (1, 18)\n # tree_order.append(abc)\n # Continue to get error: \"*** ValueError: all the input arrays must have same number of dimensions\"\n tree_order = tree_order.T\n tree_order = tree_order.sort_values(['snp_from_top', 'snp_per_column'], ascending=[True, False])\n tree_order = tree_order.T\n # Remove snp_per_column and snp_from_top rows.\n cascade_order = tree_order[:-2]\n # Output the cascade table.\n output_cascade_table(cascade_order, mqdf, group, annotation_dict)\n # Output the sorted table.\n output_sort_table(cascade_order, mqdf, group, annotation_dict)\n task_queue.task_done()\n\n\ndef set_num_cpus(num_files, processes):\n num_cpus = len(os.sched_getaffinity(0))\n if num_files < num_cpus and num_files < processes:\n return num_files\n if num_cpus < processes:\n half_cpus = int(num_cpus / 2)\n if num_files < half_cpus:\n return num_files\n return half_cpus\n return processes\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--input_avg_mq_json', action='store', dest='input_avg_mq_json', required=False, default=None, help='Average MQ json file')\n parser.add_argument('--input_newick', action='store', dest='input_newick', required=False, default=None, help='Newick file')\n parser.add_argument('--input_snps_json', action='store', dest='input_snps_json', required=False, default=None, help='SNPs json file')\n parser.add_argument('--gbk_file', action='store', dest='gbk_file', required=False, default=None, help='Optional gbk file'),\n parser.add_argument('--processes', action='store', dest='processes', type=int, help='User-selected number of processes to use for job splitting')\n\n args = parser.parse_args()\n\n if args.gbk_file is not None:\n # Create the annotation_dict for annotating\n # the Excel tables.\n annotation_dict = get_annotation_dict(args.gbk_file)\n else:\n annotation_dict = None\n\n # The assumption here is that the list of files\n # in both INPUT_NEWICK_DIR and INPUT_JSON_DIR are\n # named such that they are properly matched if\n # the directories contain more than 1 file (i.e.,\n # hopefully the newick file names and json file names\n # will be something like Mbovis-01D6_* so they can be\n # sorted and properly associated with each other).\n if args.input_newick is not None:\n newick_files = [args.input_newick]\n else:\n newick_files = []\n for file_name in sorted(os.listdir(INPUT_NEWICK_DIR)):\n file_path = os.path.abspath(os.path.join(INPUT_NEWICK_DIR, file_name))\n newick_files.append(file_path)\n if args.input_snps_json is not None:\n json_files = [args.input_snps_json]\n else:\n json_files = []\n for file_name in sorted(os.listdir(INPUT_JSON_DIR)):\n file_path = os.path.abspath(os.path.join(INPUT_JSON_DIR, file_name))\n json_files.append(file_path)\n if args.input_avg_mq_json is not None:\n json_avg_mq_files = [args.input_avg_mq_json]\n else:\n json_avg_mq_files = []\n for file_name in sorted(os.listdir(INPUT_JSON_AVG_MQ_DIR)):\n file_path = os.path.abspath(os.path.join(INPUT_JSON_AVG_MQ_DIR, file_name))\n json_avg_mq_files.append(file_path)\n\n multiprocessing.set_start_method('spawn')\n queue1 = multiprocessing.JoinableQueue()\n queue2 = multiprocessing.JoinableQueue()\n num_files = len(newick_files)\n cpus = set_num_cpus(num_files, args.processes)\n # Set a timeout for get()s in the queue.\n timeout = 0.05\n\n for i, newick_file in enumerate(newick_files):\n json_file = json_files[i]\n json_avg_mq_file = json_avg_mq_files[i]\n queue1.put((newick_file, json_file, json_avg_mq_file))\n\n # Complete the preprocess_tables task.\n processes = [multiprocessing.Process(target=preprocess_tables, args=(queue1, annotation_dict, timeout, )) for _ in range(cpus)]\n for p in processes:\n p.start()\n for p in processes:\n p.join()\n queue1.join()\n\n if queue1.empty():\n queue1.close()\n queue1.join_thread()\n"
] |
[
[
"pandas.IntervalIndex.from_arrays",
"pandas.concat",
"pandas.read_csv",
"pandas.Series",
"pandas.read_json",
"pandas.ExcelWriter"
]
] |
Living-with-machines/AtypicalAnimacy
|
[
"a5ba418d61fb2a3ad43d45dffe522324adaede7e"
] |
[
"code/tools/wordnet_utils.py"
] |
[
"from sentence_transformers import SentenceTransformer\nimport torch,scipy\nfrom nltk.corpus import wordnet as wn\nfrom tools import wemb_utils\nfrom gensim.models.wrappers import FastText\nfrom pathlib import Path\n\n# sent_sim_model = SentenceTransformer('models/bert-base-nli-mean-tokens')\n\n# wemb_model = FastText.load_fasttext_format('models/language_models/cc.en.300.bin')\n\ndef wordnet_animacy(token,sentence,wsd,tokenizer):\n\n # Token is animate if all senses are living things; token is inanimate if no senses are living things:\n hypernyms = lambda s: s.hypernyms()\n word_senses = wn.synsets(token)\n \n personal_pronouns = [\"i\", \"you\", \"he\", \"she\", \"we\", \"me\", \"him\", \n \"her\", \"us\", \"myself\", \"yourself\", \"himself\", \n \"herself\", \"ourselves\", \"yourselves\"]\n \n if token.lower() in personal_pronouns:\n return 1\n \n if not word_senses:\n return None\n \n else:\n all_living = True\n all_dead = True\n for ws in word_senses:\n ws_hypernyms = [hyp.name() for hyp in list(ws.closure(hypernyms))]\n if not 'living_thing.n.01' in ws_hypernyms:\n all_living = False\n if 'living_thing.n.01' in ws_hypernyms:\n all_dead = False\n\n if all_living == True:\n return 1\n elif all_dead == True:\n return 0\n \n # Otherwise, determine animacy by select best synset:\n if wsd is \"bert\":\n word_sense = bert_lesk_wsd(token,sentence)\n\n if wsd is \"wemb\":\n word_sense = w2v_lesk_wsd(token,sentence)\n\n else:\n word_sense = wn.synsets(token)[0]\n\n hypernyms = lambda s: s.hypernyms()\n hypernyms_word_sense = [x.name() for x in list(word_sense.closure(hypernyms))]\n\n if 'living_thing.n.01' in hypernyms_word_sense:\n return 1\n else:\n return 0\n\n\n\n\ndef w2v_sim_ranking(token,sentence,corpus,labels,wemb_model):\n \n proc_corpus = [\" \".join([tok for tok in sent.split(\" \") if tok!=token]) for sent in corpus]\n proc_sentence = \" \".join([tok for tok in sentence.split(\" \") if tok!=token])\n \n sent_embedding = wemb_utils.sent_embedding(proc_sentence,wemb_model).reshape(1,-1)\n \n results = []\n \n for x in range(len(proc_corpus)):\n label = labels[x]\n orig_gloss = corpus[x]\n proc_gloss = proc_corpus[x]\n \n gloss_embedding = wemb_utils.sent_embedding(proc_gloss,wemb_model).reshape(1,-1)\n try:\n sim = 1.0 - scipy.spatial.distance.cdist(sent_embedding, gloss_embedding, \"cosine\")[0]\n results.append([label,orig_gloss,sim])\n except Exception as e:\n print (e)\n print (results.append([label,orig_gloss]))\n results.append([label,orig_gloss,sim,0.0])\n continue\n \n \n results.sort(key=lambda x: x[2],reverse=True)\n\n return results\n\n \n \ndef bert_sim_ranking(token,sentence,corpus,labels,sent_sim_model):\n \n proc_corpus = [\" \".join([tok for tok in sent.split(\" \") if tok!=token]) for sent in corpus]\n proc_sentence = \" \".join([tok for tok in sentence.split(\" \") if tok!=token])\n \n sent_embedding = sent_sim_model.encode([proc_sentence])\n \n results = []\n \n for x in range(len(proc_corpus)):\n label = labels[x]\n orig_gloss = corpus[x]\n proc_gloss = proc_corpus[x]\n \n gloss_embedding = sent_sim_model.encode([proc_gloss])\n try:\n sim = 1.0 - scipy.spatial.distance.cdist(sent_embedding, gloss_embedding, \"cosine\")[0]\n results.append([label,orig_gloss,sim])\n except Exception as e:\n print (e)\n print (results.append([label,orig_gloss]))\n results.append([label,orig_gloss,sim,0.0])\n continue\n \n \n results.sort(key=lambda x: x[2],reverse=True)\n\n return results\n\n\ndef bert_lesk_wsd(token,sentence):\n synsets = wn.synsets(token)\n glosses = [syn.definition() for syn in synsets]\n \n pred= bert_sim_ranking(token,sentence,glosses,synsets,sent_sim_model)\n \n best_sense = pred[0][0]\n \n return best_sense\n\ndef w2v_lesk_wsd(token,sentence):\n synsets = wn.synsets(token)\n glosses = [syn.definition() for syn in synsets]\n \n pred= w2v_sim_ranking(token,sentence,glosses,synsets,wemb_model)\n \n best_sense = pred[0][0]\n \n return best_sense"
] |
[
[
"scipy.spatial.distance.cdist"
]
] |
shamanez/BERT-like-is-All-You-Need
|
[
"a6ba1f656da40103b3b5398961d18e5fc78e1efb"
] |
[
"fairseq/data/raw_audio_text_dataset.py"
] |
[
"#change the line 331\n\nimport os\nimport numpy as np\nimport sys\nimport torch\n\nfrom .import FairseqDataset\nimport random\n\nimport cv2\nfrom PIL import Image\nfrom torchvision.transforms import CenterCrop, Resize, Compose, ToTensor\n\nimport time\n\nclass RawAudioTextDataset(FairseqDataset):\n\n def __init__(self, base_path,data_args,data_split,sample_rate, max_sample_size=None, min_sample_size=None,\n shuffle=True):\n super().__init__()\n\n\n \n\n self.data_args=data_args\n\n self.sample_rate = sample_rate\n\n self.fnames_audio = []\n self.fnames_text = []\n self.sizes = []\n\n \n\n self.labels = {}\n\n self.audio_sizes = {}\n self.text_sizes = {}\n\n \n\n self.max_sample_size = max_sample_size if max_sample_size is not None else sys.maxsize\n self.min_sample_size = min_sample_size if min_sample_size is not None else self.max_sample_size\n self.base_manifest_path = base_path\n\n\n self.split = data_split\n\n \n\n if self.data_args.binary_target_iemocap: \n \n included_emotions = ['neu','ang','sad','hap','exc'] # 'exc', IEMOCAP (Max 5 emotions (only take 4 in prior work))\n \n elif self.data_args.softmax_target_meld:\n\n print(\"We are using MELD for the softmax classification\")\n\n included_emotions = ['neutral','sadness','surprise','joy','anger','fear','disgust'] #MELD (Max 7 emotion)\n #included_emotions = ['neutral','sadness','surprise','joy','anger']\n\n\n\n elif self.data_args.softmax_target_binary_meld:\n\n included_emotions = ['neutral','sadness','surprise','joy','anger','fear','disgust'] #MELD (Max 7 emotion)\n\n\n else:\n print(\"We are using MOSEI or MOSI to do a regression task\")\n\n\n manifest_audio = os.path.join(self.base_manifest_path, '{}.tsv'.format(self.split+\"_a\"))\n manifest_text = os.path.join(self.base_manifest_path, '{}.tsv'.format(self.split+\"_t\"))\n \n manifest_size = os.path.join(self.base_manifest_path, '{}.tsv'.format(self.split+\"_size\"))\n \n manifest_label = os.path.join(self.base_manifest_path, '{}.csv'.format(\"label_file_\"+self.split))\n\n \n\n with open(manifest_label, 'r') as f_l :\n self.root_dir_l = f_l.readline().strip()\n for line_l in f_l:\n\n items_l = line_l.strip().split(',')\n\n if self.data_args.regression_target_mos: \n self.labels[items_l[0].strip()] = np.round(float(items_l[1].strip()),decimals=4)\n else:\n self.labels[items_l[0].strip()] = items_l[1].strip() #for the sentiment use 2 from the list else 1\n\n\n with open(manifest_size, 'r') as f_s :\n \n for line_l in f_s:\n\n items_s = line_l.strip().split(',')\n\n \n\n self.text_sizes[items_s[0].strip()] = items_s[1].strip()\n self.audio_sizes[items_s[0].strip()] = items_s[2].strip() #for the sentiment use 2 from the list else 1\n\n \n\n inter_n=0\n with open(manifest_audio, 'r') as f_a, open(manifest_text, 'r') as f_t:#, open(manifest_label, 'r') as f_l:\n self.root_dir_a =os.path.join(self.data_args.data_raw , data_split ,'audio_token') #f_a.readline().strip()\n self.root_dir_t =os.path.join(self.data_args.data_raw , data_split ,'text') #f_t.readline().strip()\n\n \n\n # if self.split=='train':\n # self.root_dir_a =os.path.join('/hpc/gsir059/INTERSPEECH/iemocap_data' , data_split ,'audio_token') #f_a.readline().strip()\n # self.root_dir_t =os.path.join('/hpc/gsir059/INTERSPEECH/iemocap_data' , data_split ,'text') #f_t.readline().strip()\n\n \n for line_a, line_t in zip(f_a,f_t):#,f_l):, line_l\n\n \n \n items_a = line_a.strip().split('\\t')\n items_t = line_t.strip().split('\\t')\n \n \n \n\n # inter_n=inter_n+1\n\n # if self.split=='train':\n\n # if inter_n>32:\n # break\n\n \n\n assert items_a[0].split('.')[0] == items_t[0].split('.')[0] , \"misalignment of data\"\n\n\n \n emotion = self.labels.get(items_a[0].split('.')[0]) #If the label is not there, gives a none\n\n \n\n\n if self.data_args.regression_target_mos:\n\n if self.data_args.eval_metric:\n if emotion==0.0:\n continue \n\n self.fnames_audio.append(items_a[0].replace('.wav','.txt'))\n self.fnames_text.append(items_t[0])\n self.sizes.append(int(self.audio_sizes.get(items_a[0].split('.')[0])))\n \n \n \n \n \n else:\n\n \n if emotion in included_emotions: # Only using the subset of emotions\n\n\n self.fnames_audio.append(items_a[0].replace('.wav','.txt'))\n self.fnames_text.append(items_t[0])\n self.sizes.append(int(self.audio_sizes.get(items_a[0].split('.')[0])))\n\n \n\n if self.data_args.binary_target_iemocap:\n\n self.emotion_dictionary = { #EMOCAP\n 'neu':0,\n 'ang':2,\n 'hap':3,\n 'sad':1,\n 'exc':3\n }\n\n if self.data_args.softmax_target_meld: \n\n self.emotion_dictionary = { #MELD\n 'anger' : 2,\n 'joy': 3,\n 'neutral': 0,\n 'sadness': 1,\n 'surprise':4,\n 'fear':5,\n 'disgust':6\n }\n\n if self.data_args.regression_target_mos:\n\n self.emotion_dictionary = { #modei senti\n '-3' : 6,\n '-2': 5,\n '-1': 4,\n '0': 0,\n '1':1,\n '2':2,\n '3':3\n }\n\n \n \n self.shuffle = shuffle\n\n\n\n\n def __getitem__(self, index):\n\n\n \n\n \n audio_file = self.fnames_audio[index]\n text_file = self.fnames_text[index]\n \n\n fname_a = os.path.join(self.root_dir_a, audio_file)\n fname_t = os.path.join(self.root_dir_t, text_file)\n\n \n file_name = audio_file.replace('.txt','')\n \n assert file_name == text_file.replace('.txt',''), \"not all file ids match\"\n\n \n\n if self.data_args.regression_target_mos: \n label = self.labels.get(file_name)\n else:\n label = self.emotion_dictionary[str(self.labels.get(file_name))]\n \n\n\n # Text data (Roberta Tokens)\n with open(fname_t, 'r') as f:\n words = []\n for line in f:\n words.extend(line.strip().split('\\t'))\n tokensized_text = [int(word) for word in words]\n tokensized_text = torch.from_numpy(np.array(tokensized_text))\n\n # Text data (Roberta Tokens)\n with open(fname_a, 'r') as f:\n words = []\n for line in f:\n words.extend(line.strip().split('\\t'))\n tokensized_audio = [int(word) for word in words]\n tokensized_audio = torch.from_numpy(np.array(tokensized_audio))\n\n\n \n return {\n 'id': index,\n 'text': tokensized_text,\n 'audio_token':tokensized_audio,\n 'target' : label,\n }\n\n def __len__(self): #Training dataset size\n return len(self.fnames_audio)\n\n def collate_tokens(self, values, pad_idx, max_target_value,eos_idx=None, left_pad=False, move_eos_to_beginning=False):\n \"\"\"Convert a list of 1d tensors into a padded 2d tensor.\"\"\"\n\n size =max_target_value#max(v.size(0) for v in values) #Here the size can be fixed as 512\n res = values[0].new(len(values), size).fill_(pad_idx)\n\n \n def copy_tensor(src, dst):\n\n \n if src.numel()>dst.numel():\n clip_src=src[:dst.numel()-1]\n src=torch.cat((clip_src, torch.tensor([2])), 0)\n\n \n assert dst.numel() == src.numel()\n if move_eos_to_beginning:\n assert src[-1] == eos_idx\n dst[0] = eos_idx\n dst[1:] = src[:-1]\n else:\n dst.copy_(src)\n for i, v in enumerate(values):\n copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])\n return res\n\n def collate_audio_tokens(self, values, pad_idx,max_target_value, eos_idx=None, left_pad=False, move_eos_to_beginning=False):\n \"\"\"Convert a list of 1d tensors into a padded 2d tensor.\"\"\"\n\n size = max_target_value#max(v.size(0) for v in values) #Here the size can be fixed as 512\n res = values[0].new(len(values), size).fill_(pad_idx)\n\n \n \n def copy_tensor(src, dst):\n if src.numel()>dst.numel():\n clip_src=src[:dst.numel()-1]\n src=torch.cat((clip_src, torch.tensor([2])), 0)\n \n \n assert dst.numel() == src.numel()\n\n if move_eos_to_beginning:\n assert src[-1] == eos_idx\n dst[0] = eos_idx\n dst[1:] = src[:-1]\n else:\n dst.copy_(src)\n for i, v in enumerate(values):\n copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])\n return res\n\n\n\n def collater(self, samples):\n \n if len(samples) == 0:\n return {}\n\n \n \n ####################################################################\n #collater for text chunks \n #############################################\n sources_text = [s['text'] for s in samples]\n sizes_text = [len(s) for s in sources_text]\n max_target_size_t = min(max(sizes_text), 512) # max text token seq length\n\n\n collated_text = self.collate_tokens(sources_text, 1,max_target_size_t) #1 is the padding index\n\n\n \n ####################################################################\n #collater for audio token chunks \n #############################################\n sources_audio_tokens = [s['audio_token'] for s in samples]\n sizes_audio = [len(s) for s in sources_audio_tokens]\n max_target_size_a = min(max(sizes_audio), 2048) # max audio token seq length\n\n collated_audio_tokens = self.collate_audio_tokens(sources_audio_tokens, 1,max_target_size_a) #1 is the padding index\n\n\n return {\n 'id': torch.LongTensor([s['id'] for s in samples]),\n 'split':self.split,\n 'net_input': {\n 'audio': collated_audio_tokens, \n 'text': collated_text, \n },\n #'target': torch.LongTensor([int(s['target']) for s in samples])\n 'target': torch.FloatTensor([float(s['target']) for s in samples]) #onlt mosei\n }\n\n\n def get_dummy_batch(\n self, num_tokens, max_positions, src_lne=2048, tgt_len=128,\n ):\n \"\"\"Return a dummy batch with a given number of tokens.\"\"\"\n if isinstance(max_positions, float) or isinstance(max_positions, int):\n src_len = min(src_len, max_positions)\n bsz = num_tokens // src_len\n \n return self.collater([\n {\n 'id': i,\n 'audio': torch.rand(self.channels, self.timeDepth, self.xSize, self.ySize),\n 'text': torch.rand(src_len),\n 'video' : torch.rand(src_len)\n }\n for i in range(bsz)\n ])\n\n def num_tokens(self, index):\n return self.size(index)\n\n def size(self, index):\n\n \"\"\"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``.\"\"\"\n return min(self.sizes[index], self.max_sample_size)\n\n\n\n def ordered_indices(self):\n \"\"\"Return an ordered list of indices. Batches will be constructed based\n on this order.\"\"\"\n\n if self.shuffle:\n order = [np.random.permutation(len(self))]\n else:\n order = [np.arange(len(self))]\n\n order.append(self.sizes)\n return np.lexsort(order)\n"
] |
[
[
"torch.LongTensor",
"numpy.lexsort",
"torch.tensor",
"torch.rand",
"numpy.array"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.